diff --git a/.gitignore b/.gitignore index f12c5532ca8b6..f553c7e473a86 100644 --- a/.gitignore +++ b/.gitignore @@ -37,3 +37,4 @@ bazel-testlogs bazel-tidb .ijwb/ /oom_record/ +*.log.json diff --git a/DEPS.bzl b/DEPS.bzl index 91adc5614dc12..ab869417dac13 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -177,13 +177,13 @@ def go_deps(): name = "com_github_alecthomas_template", build_file_proto_mode = "disable_global", importpath = "github.com/alecthomas/template", - sha256 = "25e3be7192932d130d0af31ce5bcddae887647ba4afcfb32009c3b9b79dbbdb3", - strip_prefix = "github.com/alecthomas/template@v0.0.0-20190718012654-fb15b899a751", + sha256 = "86de3337a475e323a0fb54ef03386a4e495682201f42795bd7be646c05298692", + strip_prefix = "github.com/alecthomas/template@v0.0.0-20160405071501-a0175ee3bccc", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20190718012654-fb15b899a751.zip", - "http://ats.apps.svc/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20190718012654-fb15b899a751.zip", - "https://cache.hawkingrei.com/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20190718012654-fb15b899a751.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20190718012654-fb15b899a751.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20160405071501-a0175ee3bccc.zip", + "http://ats.apps.svc/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20160405071501-a0175ee3bccc.zip", + "https://cache.hawkingrei.com/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20160405071501-a0175ee3bccc.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/alecthomas/template/com_github_alecthomas_template-v0.0.0-20160405071501-a0175ee3bccc.zip", ], ) go_repository( @@ -641,19 +641,6 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/biogo/store/com_github_biogo_store-v0.0.0-20160505134755-913427a1d5e8.zip", ], ) - go_repository( - name = "com_github_bketelsen_crypt", - build_file_proto_mode = "disable_global", - importpath = "github.com/bketelsen/crypt", - sha256 = "3df95e9bd6b8861009176bc5e4f5ebc6b0ff9857df6c1b3a8ece4fb595da02e7", - strip_prefix = "github.com/bketelsen/crypt@v0.0.3-0.20200106085610-5cbc8cc4026c", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/bketelsen/crypt/com_github_bketelsen_crypt-v0.0.3-0.20200106085610-5cbc8cc4026c.zip", - "http://ats.apps.svc/gomod/github.com/bketelsen/crypt/com_github_bketelsen_crypt-v0.0.3-0.20200106085610-5cbc8cc4026c.zip", - "https://cache.hawkingrei.com/gomod/github.com/bketelsen/crypt/com_github_bketelsen_crypt-v0.0.3-0.20200106085610-5cbc8cc4026c.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/bketelsen/crypt/com_github_bketelsen_crypt-v0.0.3-0.20200106085610-5cbc8cc4026c.zip", - ], - ) go_repository( name = "com_github_bkielbasa_cyclop", build_file_proto_mode = "disable_global", @@ -823,6 +810,19 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cenk/backoff/com_github_cenk_backoff-v2.0.0+incompatible.zip", ], ) + go_repository( + name = "com_github_cenkalti_backoff_v4", + build_file_proto_mode = "disable_global", + importpath = "github.com/cenkalti/backoff/v4", + sha256 = "de69f5db190ee0f2c441e50e4bf607853ab99512a183a5713803888ced502dde", + strip_prefix = "github.com/cenkalti/backoff/v4@v4.1.1", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/github.com/cenkalti/backoff/v4/com_github_cenkalti_backoff_v4-v4.1.1.zip", + "http://ats.apps.svc/gomod/github.com/cenkalti/backoff/v4/com_github_cenkalti_backoff_v4-v4.1.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/cenkalti/backoff/v4/com_github_cenkalti_backoff_v4-v4.1.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cenkalti/backoff/v4/com_github_cenkalti_backoff_v4-v4.1.1.zip", + ], + ) go_repository( name = "com_github_census_instrumentation_opencensus_proto", build_file_proto_mode = "disable_global", @@ -840,13 +840,13 @@ def go_deps(): name = "com_github_certifi_gocertifi", build_file_proto_mode = "disable_global", importpath = "github.com/certifi/gocertifi", - sha256 = "11d525844c3dd711fb0ae31acc9ebd8a4d602215f14ff24ad1764ecb48464849", - strip_prefix = "github.com/certifi/gocertifi@v0.0.0-20200922220541-2c3bb06c6054", + sha256 = "e007c669f49757301c34b7c5bc4a37f0fbe3707ed123995728cb814217fae2f7", + strip_prefix = "github.com/certifi/gocertifi@v0.0.0-20180905225744-ee1a9a0726d2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20200922220541-2c3bb06c6054.zip", - "http://ats.apps.svc/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20200922220541-2c3bb06c6054.zip", - "https://cache.hawkingrei.com/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20200922220541-2c3bb06c6054.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20200922220541-2c3bb06c6054.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20180905225744-ee1a9a0726d2.zip", + "http://ats.apps.svc/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20180905225744-ee1a9a0726d2.zip", + "https://cache.hawkingrei.com/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20180905225744-ee1a9a0726d2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/certifi/gocertifi/com_github_certifi_gocertifi-v0.0.0-20180905225744-ee1a9a0726d2.zip", ], ) go_repository( @@ -1035,13 +1035,13 @@ def go_deps(): name = "com_github_cncf_xds_go", build_file_proto_mode = "disable_global", importpath = "github.com/cncf/xds/go", - sha256 = "7e33dbf929da89661e8f7507706f7ea28762d7c48c899d8e8352145c11627bf4", - strip_prefix = "github.com/cncf/xds/go@v0.0.0-20230105202645-06c439db220b", + sha256 = "a0c6e66eade357aeda4edaa9d09612085860dc4c0b44edf8226574939bdf6091", + strip_prefix = "github.com/cncf/xds/go@v0.0.0-20230607035331-e9ce68804cb4", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230105202645-06c439db220b.zip", - "http://ats.apps.svc/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230105202645-06c439db220b.zip", - "https://cache.hawkingrei.com/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230105202645-06c439db220b.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230105202645-06c439db220b.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230607035331-e9ce68804cb4.zip", + "http://ats.apps.svc/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230607035331-e9ce68804cb4.zip", + "https://cache.hawkingrei.com/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230607035331-e9ce68804cb4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cncf/xds/go/com_github_cncf_xds_go-v0.0.0-20230607035331-e9ce68804cb4.zip", ], ) go_repository( @@ -1100,13 +1100,13 @@ def go_deps(): name = "com_github_cockroachdb_datadriven", build_file_proto_mode = "disable_global", importpath = "github.com/cockroachdb/datadriven", - sha256 = "27661be7dc3cff4288f9a150f7e82fad6bb53382bb8d87bcfe8b22a85732c414", - strip_prefix = "github.com/cockroachdb/datadriven@v1.0.0", + sha256 = "1818b828715b773ea9eaf415fa3cc176c411e18f645ec85440b14abaf1f387c4", + strip_prefix = "github.com/cockroachdb/datadriven@v1.0.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.0.zip", - "http://ats.apps.svc/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.2.zip", + "http://ats.apps.svc/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/cockroachdb/datadriven/com_github_cockroachdb_datadriven-v1.0.2.zip", ], ) go_repository( @@ -1252,30 +1252,17 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coocood/rtutil/com_github_coocood_rtutil-v0.0.0-20190304133409-c84515f646f2.zip", ], ) - go_repository( - name = "com_github_coreos_bbolt", - build_file_proto_mode = "disable_global", - importpath = "github.com/coreos/bbolt", - sha256 = "097e7c6cf2dc9c50a0c8827f451bd3cba44c2cbf086d4fb684f2dfada9bfa841", - strip_prefix = "github.com/coreos/bbolt@v1.3.2", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/bbolt/com_github_coreos_bbolt-v1.3.2.zip", - "http://ats.apps.svc/gomod/github.com/coreos/bbolt/com_github_coreos_bbolt-v1.3.2.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/bbolt/com_github_coreos_bbolt-v1.3.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/bbolt/com_github_coreos_bbolt-v1.3.2.zip", - ], - ) go_repository( name = "com_github_coreos_etcd", build_file_proto_mode = "disable_global", importpath = "github.com/coreos/etcd", - sha256 = "c32b3fc5dba0eeb8533d628489cf862c4eb360644d79c597bcc6290f3d74b046", - strip_prefix = "github.com/coreos/etcd@v3.3.13+incompatible", + sha256 = "5848e1797f8d426f4aa4b61b15611456fb0183f974cbf9e64a8a11e740883367", + strip_prefix = "github.com/coreos/etcd@v3.3.12+incompatible", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.13+incompatible.zip", - "http://ats.apps.svc/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.13+incompatible.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.13+incompatible.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.13+incompatible.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.12+incompatible.zip", + "http://ats.apps.svc/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.12+incompatible.zip", + "https://cache.hawkingrei.com/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.12+incompatible.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/etcd/com_github_coreos_etcd-v3.3.12+incompatible.zip", ], ) go_repository( @@ -1295,52 +1282,26 @@ def go_deps(): name = "com_github_coreos_go_semver", build_file_proto_mode = "disable_global", importpath = "github.com/coreos/go-semver", - sha256 = "b2fc075395ffc34cff4b964681d0ae3cd22096cfcadd2970eeaa877596ceb210", - strip_prefix = "github.com/coreos/go-semver@v0.3.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.0.zip", - "http://ats.apps.svc/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.0.zip", - ], - ) - go_repository( - name = "com_github_coreos_go_systemd", - build_file_proto_mode = "disable_global", - importpath = "github.com/coreos/go-systemd", - sha256 = "cd349df002e0900cd0a5f9648720621840164c4b530f3e3457510e7e08589307", - strip_prefix = "github.com/coreos/go-systemd@v0.0.0-20190321100706-95778dfbb74e", + sha256 = "e72820542b5913afe0a52e956e0b3834e9fbb080641fed183117f862fab74e8a", + strip_prefix = "github.com/coreos/go-semver@v0.3.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-systemd/com_github_coreos_go_systemd-v0.0.0-20190321100706-95778dfbb74e.zip", - "http://ats.apps.svc/gomod/github.com/coreos/go-systemd/com_github_coreos_go_systemd-v0.0.0-20190321100706-95778dfbb74e.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/go-systemd/com_github_coreos_go_systemd-v0.0.0-20190321100706-95778dfbb74e.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-systemd/com_github_coreos_go_systemd-v0.0.0-20190321100706-95778dfbb74e.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.1.zip", + "http://ats.apps.svc/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-semver/com_github_coreos_go_semver-v0.3.1.zip", ], ) go_repository( name = "com_github_coreos_go_systemd_v22", build_file_proto_mode = "disable_global", importpath = "github.com/coreos/go-systemd/v22", - sha256 = "01134ae89bf4a91c17eeb1f8425e1064f9bde64cf3ce0c9cf546a9fa1ee25e64", - strip_prefix = "github.com/coreos/go-systemd/v22@v22.3.2", + sha256 = "4c44e3a6b84de4db393e341537c7124031fa98d5f98860ad31b32b4890f2234c", + strip_prefix = "github.com/coreos/go-systemd/v22@v22.5.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.3.2.zip", - "http://ats.apps.svc/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.3.2.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.3.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.3.2.zip", - ], - ) - go_repository( - name = "com_github_coreos_pkg", - build_file_proto_mode = "disable_global", - importpath = "github.com/coreos/pkg", - sha256 = "7fe161d49439a9b4136c932233cb4b803b9e3ac7ee46f39ce247defc4f4ea8d7", - strip_prefix = "github.com/coreos/pkg@v0.0.0-20180928190104-399ea9e2e55f", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/pkg/com_github_coreos_pkg-v0.0.0-20180928190104-399ea9e2e55f.zip", - "http://ats.apps.svc/gomod/github.com/coreos/pkg/com_github_coreos_pkg-v0.0.0-20180928190104-399ea9e2e55f.zip", - "https://cache.hawkingrei.com/gomod/github.com/coreos/pkg/com_github_coreos_pkg-v0.0.0-20180928190104-399ea9e2e55f.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/pkg/com_github_coreos_pkg-v0.0.0-20180928190104-399ea9e2e55f.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.5.0.zip", + "http://ats.apps.svc/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.5.0.zip", + "https://cache.hawkingrei.com/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.5.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/coreos/go-systemd/v22/com_github_coreos_go_systemd_v22-v22.5.0.zip", ], ) go_repository( @@ -1776,26 +1737,26 @@ def go_deps(): name = "com_github_envoyproxy_go_control_plane", build_file_proto_mode = "disable_global", importpath = "github.com/envoyproxy/go-control-plane", - sha256 = "aa0530fbbbe2d4683035547b14d58a7318f408398e10092637f20642de82c9ff", - strip_prefix = "github.com/envoyproxy/go-control-plane@v0.10.3", + sha256 = "82588fbec310f9103344442e997c4ee72104821cf69caaccc829d9d272aa0d10", + strip_prefix = "github.com/envoyproxy/go-control-plane@v0.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.10.3.zip", - "http://ats.apps.svc/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.10.3.zip", - "https://cache.hawkingrei.com/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.10.3.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.10.3.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.11.1.zip", + "http://ats.apps.svc/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.11.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/envoyproxy/go-control-plane/com_github_envoyproxy_go_control_plane-v0.11.1.zip", ], ) go_repository( name = "com_github_envoyproxy_protoc_gen_validate", build_file_proto_mode = "disable_global", importpath = "github.com/envoyproxy/protoc-gen-validate", - sha256 = "7ca5aeb463c05869073076ec25ccc4144edd41d48971f1b5fd8cec1bf12a0d48", - strip_prefix = "github.com/envoyproxy/protoc-gen-validate@v0.9.1", + sha256 = "a79d19fb065554b214492c7619d760b94405429e7ca69ede922e968929a66ffb", + strip_prefix = "github.com/envoyproxy/protoc-gen-validate@v1.0.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v0.9.1.zip", - "http://ats.apps.svc/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v0.9.1.zip", - "https://cache.hawkingrei.com/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v0.9.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v0.9.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v1.0.2.zip", + "http://ats.apps.svc/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v1.0.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v1.0.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/envoyproxy/protoc-gen-validate/com_github_envoyproxy_protoc_gen_validate-v1.0.2.zip", ], ) go_repository( @@ -1993,19 +1954,6 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/fogleman/gg/com_github_fogleman_gg-v1.2.1-0.20190220221249-0403632d5b90.zip", ], ) - go_repository( - name = "com_github_form3tech_oss_jwt_go", - build_file_proto_mode = "disable_global", - importpath = "github.com/form3tech-oss/jwt-go", - sha256 = "ebe8386761761d53fac2de5f8f575ddf66c114ec9835947c761131662f1d38f3", - strip_prefix = "github.com/form3tech-oss/jwt-go@v3.2.6-0.20210809144907-32ab6a8243d7+incompatible", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/form3tech-oss/jwt-go/com_github_form3tech_oss_jwt_go-v3.2.5+incompatible.zip", - "http://ats.apps.svc/gomod/github.com/form3tech-oss/jwt-go/com_github_form3tech_oss_jwt_go-v3.2.5+incompatible.zip", - "https://cache.hawkingrei.com/gomod/github.com/form3tech-oss/jwt-go/com_github_form3tech_oss_jwt_go-v3.2.5+incompatible.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/form3tech-oss/jwt-go/com_github_form3tech_oss_jwt_go-v3.2.5+incompatible.zip", - ], - ) go_repository( name = "com_github_fortytw2_leaktest", build_file_proto_mode = "disable_global", @@ -2101,13 +2049,13 @@ def go_deps(): name = "com_github_getsentry_raven_go", build_file_proto_mode = "disable_global", importpath = "github.com/getsentry/raven-go", - sha256 = "eaffe69939612cd05f95e1846b8ddb4043655571be34cdb6412a66b41b6826eb", - strip_prefix = "github.com/getsentry/raven-go@v0.2.0", + sha256 = "99cba0dce93b1c0ca86b44787bb7a61e31da95a11773dfa197a67f4a92f75b71", + strip_prefix = "github.com/getsentry/raven-go@v0.1.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.2.0.zip", - "http://ats.apps.svc/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.2.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.2.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.2.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.1.2.zip", + "http://ats.apps.svc/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.1.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.1.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/getsentry/raven-go/com_github_getsentry_raven_go-v0.1.2.zip", ], ) go_repository( @@ -2686,13 +2634,13 @@ def go_deps(): name = "com_github_golang_glog", build_file_proto_mode = "disable_global", importpath = "github.com/golang/glog", - sha256 = "668beb5dd923378b00fda4ba0d965000f3f259be5ba05ebd341a2949e8f20db6", - strip_prefix = "github.com/golang/glog@v1.1.0", + sha256 = "f17e7d8a4485e91373c72d7ed688b23cafe647cd4e2bb8de669e39a35432fbec", + strip_prefix = "github.com/golang/glog@v1.1.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.0.zip", - "http://ats.apps.svc/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.2.zip", + "http://ats.apps.svc/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/golang/glog/com_github_golang_glog-v1.1.2.zip", ], ) go_repository( @@ -2725,13 +2673,13 @@ def go_deps(): name = "com_github_golang_jwt_jwt_v4", build_file_proto_mode = "disable_global", importpath = "github.com/golang-jwt/jwt/v4", - sha256 = "bea2e7c045b07f50b60211bee94b62c442322ded7fa893e3fda49dcdce0e2908", - strip_prefix = "github.com/golang-jwt/jwt/v4@v4.2.0", + sha256 = "331efc33198957256c57258caf96199fec534d0c0849da303a11fb013b47d101", + strip_prefix = "github.com/golang-jwt/jwt/v4@v4.4.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.2.0.zip", - "http://ats.apps.svc/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.2.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.2.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.2.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.4.2.zip", + "http://ats.apps.svc/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.4.2.zip", + "https://cache.hawkingrei.com/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.4.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/golang-jwt/jwt/v4/com_github_golang_jwt_jwt_v4-v4.4.2.zip", ], ) go_repository( @@ -3102,6 +3050,19 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/github.com/google/renameio/v2/com_github_google_renameio_v2-v2.0.0.zip", ], ) + go_repository( + name = "com_github_google_s2a_go", + build_file_proto_mode = "disable_global", + importpath = "github.com/google/s2a-go", + sha256 = "b01ff39fc8c27f944da1c3f78106d57e165f234d98115c344f448b603ae24ff3", + strip_prefix = "github.com/google/s2a-go@v0.1.4", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/github.com/google/s2a-go/com_github_google_s2a_go-v0.1.4.zip", + "http://ats.apps.svc/gomod/github.com/google/s2a-go/com_github_google_s2a_go-v0.1.4.zip", + "https://cache.hawkingrei.com/gomod/github.com/google/s2a-go/com_github_google_s2a_go-v0.1.4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/google/s2a-go/com_github_google_s2a_go-v0.1.4.zip", + ], + ) go_repository( name = "com_github_google_skylark", build_file_proto_mode = "disable_global", @@ -3119,39 +3080,39 @@ def go_deps(): name = "com_github_google_uuid", build_file_proto_mode = "disable_global", importpath = "github.com/google/uuid", - sha256 = "0a5fcc05ea492afeaca984a012485f6a15e2259b32f1206d6f36a88c88afc607", - strip_prefix = "github.com/google/uuid@v1.3.0", + sha256 = "9d9d6cfb28ce6dbe4b518c42c6bccd67bb531a106859808f36e82a5c3fb8c64d", + strip_prefix = "github.com/google/uuid@v1.3.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.0.zip", - "http://ats.apps.svc/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.1.zip", + "http://ats.apps.svc/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/google/uuid/com_github_google_uuid-v1.3.1.zip", ], ) go_repository( name = "com_github_googleapis_enterprise_certificate_proxy", build_file_proto_mode = "disable_global", importpath = "github.com/googleapis/enterprise-certificate-proxy", - sha256 = "e3a5b32ca7fc4f8bc36274d87c3547975a2b0603b2a1e4b1129530504d9ddeb7", - strip_prefix = "github.com/googleapis/enterprise-certificate-proxy@v0.2.3", + sha256 = "f0642434f18b33f21c5b2a908907f4c3ae24223791c1c4b92d13d351bfa7ed7e", + strip_prefix = "github.com/googleapis/enterprise-certificate-proxy@v0.2.4", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.3.zip", - "http://ats.apps.svc/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.3.zip", - "https://cache.hawkingrei.com/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.3.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.3.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.4.zip", + "http://ats.apps.svc/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.4.zip", + "https://cache.hawkingrei.com/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/googleapis/enterprise-certificate-proxy/com_github_googleapis_enterprise_certificate_proxy-v0.2.4.zip", ], ) go_repository( name = "com_github_googleapis_gax_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/googleapis/gax-go/v2", - sha256 = "b9bdfe36843cdc62b1eb2ba66ac1410164c2478c88c6bfe16c9ce2859922ee80", - strip_prefix = "github.com/googleapis/gax-go/v2@v2.7.1", + sha256 = "10ad5944b8bcce3f2cb9a215a0dda163de5b1f092e61b74a4e162d1eb8f7f7a2", + strip_prefix = "github.com/googleapis/gax-go/v2@v2.12.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.7.1.zip", - "http://ats.apps.svc/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.7.1.zip", - "https://cache.hawkingrei.com/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.7.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.7.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.12.0.zip", + "http://ats.apps.svc/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.12.0.zip", + "https://cache.hawkingrei.com/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.12.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/googleapis/gax-go/v2/com_github_googleapis_gax_go_v2-v2.12.0.zip", ], ) go_repository( @@ -4480,13 +4441,13 @@ def go_deps(): name = "com_github_konsorten_go_windows_terminal_sequences", build_file_proto_mode = "disable_global", importpath = "github.com/konsorten/go-windows-terminal-sequences", - sha256 = "429b01413b972b108ea86bbde3d5e660913f3e8099190d07ccfb2f186bc6d837", - strip_prefix = "github.com/konsorten/go-windows-terminal-sequences@v1.0.3", + sha256 = "7fd0273fc0855ed08172c150f756e708d6e43c4a6d52ca4939a8b43d03356091", + strip_prefix = "github.com/konsorten/go-windows-terminal-sequences@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.3.zip", - "http://ats.apps.svc/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.3.zip", - "https://cache.hawkingrei.com/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.3.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.3.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.1.zip", + "http://ats.apps.svc/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/konsorten/go-windows-terminal-sequences/com_github_konsorten_go_windows_terminal_sequences-v1.0.1.zip", ], ) go_repository( @@ -6027,13 +5988,13 @@ def go_deps(): name = "com_github_prometheus_client_golang", build_file_proto_mode = "disable_global", importpath = "github.com/prometheus/client_golang", - sha256 = "0167cee686b836da39815e4a7ea64ecc245f6a3fb9b3c3f729941ed55da7dd4f", - strip_prefix = "github.com/prometheus/client_golang@v1.16.0", + sha256 = "db3c3279e5f3377cc21bf7f353ba67a7472321fad5562990cd55adc2127538f9", + strip_prefix = "github.com/prometheus/client_golang@v1.17.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.16.0.zip", - "http://ats.apps.svc/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.16.0.zip", - "https://cache.hawkingrei.com/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.16.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.16.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.17.0.zip", + "http://ats.apps.svc/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.17.0.zip", + "https://cache.hawkingrei.com/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.17.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/prometheus/client_golang/com_github_prometheus_client_golang-v1.17.0.zip", ], ) go_repository( @@ -7067,13 +7028,13 @@ def go_deps(): name = "com_github_tikv_client_go_v2", build_file_proto_mode = "disable_global", importpath = "github.com/tikv/client-go/v2", - sha256 = "e4953948d2346bf26d95fcc860b612c7c1d86f07c80a754db2551067912d37c8", - strip_prefix = "github.com/tikv/client-go/v2@v2.0.8-0.20231010061802-07432ef6c031", + sha256 = "b689432454a504f8ba1ad166ebf901584155edc64eed4119a30c07ab52e3af8f", + strip_prefix = "github.com/tikv/client-go/v2@v2.0.8-0.20231030120815-1362f1e87566", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231010061802-07432ef6c031.zip", - "http://ats.apps.svc/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231010061802-07432ef6c031.zip", - "https://cache.hawkingrei.com/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231010061802-07432ef6c031.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231010061802-07432ef6c031.zip", + "http://bazel-cache.pingcap.net:8080/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231030120815-1362f1e87566.zip", + "http://ats.apps.svc/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231030120815-1362f1e87566.zip", + "https://cache.hawkingrei.com/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231030120815-1362f1e87566.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/github.com/tikv/client-go/v2/com_github_tikv_client_go_v2-v2.0.8-0.20231030120815-1362f1e87566.zip", ], ) go_repository( @@ -7678,351 +7639,338 @@ def go_deps(): name = "com_google_cloud_go", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go", - sha256 = "8bdce0d7bfc07e71cebbbd7df2d93d1418a35eed09211bb21e3c1ee8d2fabf7c", - strip_prefix = "cloud.google.com/go@v0.110.0", + sha256 = "3d0ed6092ddd6ffdc4ec4f39e627a706c8d71e09330768c8174428db289d21a4", + strip_prefix = "cloud.google.com/go@v0.110.8", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.8.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.8.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.8.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/com_google_cloud_go-v0.110.8.zip", ], ) go_repository( name = "com_google_cloud_go_accessapproval", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/accessapproval", - sha256 = "4fd31c02273e95e4032c7652822e740dbf074d77d66002df0fb96c1222fd0d1e", - strip_prefix = "cloud.google.com/go/accessapproval@v1.6.0", + sha256 = "e81216a40f4ed1779d4fd2a031ba6df523c9dc4cbe459ed8e746b6044c865248", + strip_prefix = "cloud.google.com/go/accessapproval@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/accessapproval/com_google_cloud_go_accessapproval-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_accesscontextmanager", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/accesscontextmanager", - sha256 = "90230ccc20b02821de0ef578914c7c32ac3189ebcce539da521228df768fa4f1", - strip_prefix = "cloud.google.com/go/accesscontextmanager@v1.7.0", + sha256 = "4010c95bbdceab4050e4b931bdbe978f50e83de78034c9671a23c414bdb97dc3", + strip_prefix = "cloud.google.com/go/accesscontextmanager@v1.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/accesscontextmanager/com_google_cloud_go_accesscontextmanager-v1.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_aiplatform", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/aiplatform", - sha256 = "e61385ceceb7eb9ef93c80daf51787f083470f104d113c8460794744a853c927", - strip_prefix = "cloud.google.com/go/aiplatform@v1.37.0", + sha256 = "9f49dbeaf81d8bf076b2a23f3d2c822f4d60ab41997ca8e9db081ad2e0945e42", + strip_prefix = "cloud.google.com/go/aiplatform@v1.51.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.37.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.37.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.37.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.37.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.51.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.51.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.51.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/aiplatform/com_google_cloud_go_aiplatform-v1.51.1.zip", ], ) go_repository( name = "com_google_cloud_go_analytics", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/analytics", - sha256 = "b2c08e99d317393ea9102cbb4f309d16170790a793b95eeafd026f8263281b3f", - strip_prefix = "cloud.google.com/go/analytics@v0.19.0", + sha256 = "1f0b79e19aa45178c06545ac0eb1ec067583f7742520c933d25722ebdb8d1c2c", + strip_prefix = "cloud.google.com/go/analytics@v0.21.4", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.19.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.19.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.19.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.19.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.21.4.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.21.4.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.21.4.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/analytics/com_google_cloud_go_analytics-v0.21.4.zip", ], ) go_repository( name = "com_google_cloud_go_apigateway", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/apigateway", - sha256 = "81f9cf7d46093a4cf3bb6dfb7ea942784295f093261c45698656dd844bdfa163", - strip_prefix = "cloud.google.com/go/apigateway@v1.5.0", + sha256 = "d3a522706734344ff09513c72b05a8e39bbfe093f9cbab07c3c081698306b014", + strip_prefix = "cloud.google.com/go/apigateway@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigateway/com_google_cloud_go_apigateway-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_apigeeconnect", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/apigeeconnect", - sha256 = "a0ae141afd9c762b722778b3508dcc459e18c6890a22586235dafc0f436532a2", - strip_prefix = "cloud.google.com/go/apigeeconnect@v1.5.0", + sha256 = "5d6c4ab3a4a0e921c26b073f7c29ad7dcaff23eef07bd510f2d42fc2a4bb9fc9", + strip_prefix = "cloud.google.com/go/apigeeconnect@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigeeconnect/com_google_cloud_go_apigeeconnect-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_apigeeregistry", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/apigeeregistry", - sha256 = "1cf7728c1b8d31247d5c2ec10b4b252d6224e9549c2ee7d2222b482dec8aeba4", - strip_prefix = "cloud.google.com/go/apigeeregistry@v0.6.0", + sha256 = "949009434d483756469a40a091e20b979fde2811df9a7f9d5955e1cceecb9b65", + strip_prefix = "cloud.google.com/go/apigeeregistry@v0.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.6.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_apikeys", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/apikeys", - sha256 = "511ba83f3837459a9e553026ecf556ebec9007403054635d90f065f7d735ddbe", - strip_prefix = "cloud.google.com/go/apikeys@v0.6.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apikeys/com_google_cloud_go_apikeys-v0.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/apikeys/com_google_cloud_go_apikeys-v0.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apikeys/com_google_cloud_go_apikeys-v0.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apikeys/com_google_cloud_go_apikeys-v0.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/apigeeregistry/com_google_cloud_go_apigeeregistry-v0.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_appengine", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/appengine", - sha256 = "09f35ee5b9d8782bced76b733c7c3a2a5f3b9e41630236a47854b4a92567e646", - strip_prefix = "cloud.google.com/go/appengine@v1.7.1", + sha256 = "ef8ebfc267080d470ebe4bcebef59e8bae90a1418b3f03b27f334e2058d4517e", + strip_prefix = "cloud.google.com/go/appengine@v1.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.7.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.7.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.7.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.7.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/appengine/com_google_cloud_go_appengine-v1.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_area120", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/area120", - sha256 = "7dcfdf365eb9f29fcedf29b8e32f0023b829732869dc7ad9a2cd8450cbdea8df", - strip_prefix = "cloud.google.com/go/area120@v0.7.1", + sha256 = "04c79c0f28dce15cc4c3ff476995e9691431417b85293b8b202923ea85c2bab5", + strip_prefix = "cloud.google.com/go/area120@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.7.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.7.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.7.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.7.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/area120/com_google_cloud_go_area120-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_artifactregistry", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/artifactregistry", - sha256 = "abf73586bdced0f590918b37f19643646c3aa04a651480cbdbfad86171f03d98", - strip_prefix = "cloud.google.com/go/artifactregistry@v1.13.0", + sha256 = "121b1aba80e678166214cdcf45093fdface59a86ff7a930f3a44381e9c3c2f43", + strip_prefix = "cloud.google.com/go/artifactregistry@v1.14.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.14.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.14.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.14.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/artifactregistry/com_google_cloud_go_artifactregistry-v1.14.3.zip", ], ) go_repository( name = "com_google_cloud_go_asset", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/asset", - sha256 = "dcaee2c49835e7f9c53d77b21738d4d803e25b2b52dc4c71c5e145332fead841", - strip_prefix = "cloud.google.com/go/asset@v1.13.0", + sha256 = "8c99032799a39f65d87c1ed91bbaebe5ed2b84675231933106b0a8b48997214a", + strip_prefix = "cloud.google.com/go/asset@v1.15.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.15.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.15.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.15.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/asset/com_google_cloud_go_asset-v1.15.1.zip", ], ) go_repository( name = "com_google_cloud_go_assuredworkloads", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/assuredworkloads", - sha256 = "f82b2f4ba2d692deff3ccf7dacfc23e744d70804f55fbb34affee7552da4f730", - strip_prefix = "cloud.google.com/go/assuredworkloads@v1.10.0", + sha256 = "31788ef9db908faea41ecbd7f8d49246d47e77897029f153df641759f9456e78", + strip_prefix = "cloud.google.com/go/assuredworkloads@v1.11.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.11.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.11.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.11.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/assuredworkloads/com_google_cloud_go_assuredworkloads-v1.11.2.zip", ], ) go_repository( name = "com_google_cloud_go_automl", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/automl", - sha256 = "e8a1b910ab247a441ad74592d93d4c37721d7ecfde2dcd7afceeaffab0505574", - strip_prefix = "cloud.google.com/go/automl@v1.12.0", + sha256 = "2d4aea70974f6409654bad3125ae1d80b810a1cb1777aee622c8502dd52c6693", + strip_prefix = "cloud.google.com/go/automl@v1.13.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.13.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.13.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.13.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/automl/com_google_cloud_go_automl-v1.13.2.zip", ], ) go_repository( name = "com_google_cloud_go_baremetalsolution", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/baremetalsolution", - sha256 = "f3bdfc95c4743654198599087e86063428d823b10c8f4b59260376255403d3a6", - strip_prefix = "cloud.google.com/go/baremetalsolution@v0.5.0", + sha256 = "79a2af9446dad6522ffaf60e3da8f564813b6a3ec7d71f43080f86e49bf90460", + strip_prefix = "cloud.google.com/go/baremetalsolution@v1.2.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v0.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v0.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v0.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v0.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v1.2.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v1.2.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v1.2.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/baremetalsolution/com_google_cloud_go_baremetalsolution-v1.2.1.zip", ], ) go_repository( name = "com_google_cloud_go_batch", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/batch", - sha256 = "9b7fda9ddd263f3cb57afe020014bb4153736e13656dd39896088bda972b3f8c", - strip_prefix = "cloud.google.com/go/batch@v0.7.0", + sha256 = "d06b057177356f5d91140f59c2015a295f98f040f81d54b25f0d0bff230e3b2a", + strip_prefix = "cloud.google.com/go/batch@v1.5.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v1.5.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v1.5.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v1.5.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/batch/com_google_cloud_go_batch-v1.5.1.zip", ], ) go_repository( name = "com_google_cloud_go_beyondcorp", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/beyondcorp", - sha256 = "6ff3ee86f910355281d4fccbf476922447ea6ba33579e5d40c7dcec407dfdf1a", - strip_prefix = "cloud.google.com/go/beyondcorp@v0.5.0", + sha256 = "e7497be44bc10e4c468a3b100f65ae8e3d351034544c2feb1447f54300659bfd", + strip_prefix = "cloud.google.com/go/beyondcorp@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v0.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v0.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v0.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v0.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v1.0.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/beyondcorp/com_google_cloud_go_beyondcorp-v1.0.1.zip", ], ) go_repository( name = "com_google_cloud_go_bigquery", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/bigquery", - sha256 = "3866e7d059fb9fb91f5323bc2061aded6834162d76e476da27ab64e48c2a6755", - strip_prefix = "cloud.google.com/go/bigquery@v1.50.0", + sha256 = "e876ce8407d288df3075f142c19c429540a7a917b1fdd6dd68b3438ad8349412", + strip_prefix = "cloud.google.com/go/bigquery@v1.56.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.50.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.50.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.50.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.50.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.56.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.56.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.56.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/bigquery/com_google_cloud_go_bigquery-v1.56.0.zip", ], ) go_repository( name = "com_google_cloud_go_billing", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/billing", - sha256 = "6a1422bb60b43683d1b5d1be3eacd1992b1bb656e187cec3e398c9d27299eadb", - strip_prefix = "cloud.google.com/go/billing@v1.13.0", + sha256 = "fecfc00cf9ea70a1109ec8e1a190e328fb1cc2f0159b1d4b10111cd5651b2bae", + strip_prefix = "cloud.google.com/go/billing@v1.17.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.17.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.17.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.17.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/billing/com_google_cloud_go_billing-v1.17.2.zip", ], ) go_repository( name = "com_google_cloud_go_binaryauthorization", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/binaryauthorization", - sha256 = "4a5d9c61a748d7b2dc14542c66f033701694e537b954619fb70f53aa1f31263f", - strip_prefix = "cloud.google.com/go/binaryauthorization@v1.5.0", + sha256 = "afb102bcbd2836c1371d9e6a179da9109cdaa5c41a286d73ee6c93d3ae775736", + strip_prefix = "cloud.google.com/go/binaryauthorization@v1.7.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.7.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.7.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.7.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/binaryauthorization/com_google_cloud_go_binaryauthorization-v1.7.1.zip", ], ) go_repository( name = "com_google_cloud_go_certificatemanager", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/certificatemanager", - sha256 = "28c924f5edcc34f79ae7e7542a0179b0f49457f9ce6e89c86336fe5be2fdb8ac", - strip_prefix = "cloud.google.com/go/certificatemanager@v1.6.0", + sha256 = "877ddd1a5c8e2efa94f0055b5371306eb07cf4cd52d5a70c15e0c38d6f6d8e32", + strip_prefix = "cloud.google.com/go/certificatemanager@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/certificatemanager/com_google_cloud_go_certificatemanager-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_channel", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/channel", - sha256 = "097f8225139cc2f3d4676e6b78d1d4cdbfd0f5558e1ab3a66ded9a085700d4b2", - strip_prefix = "cloud.google.com/go/channel@v1.12.0", + sha256 = "fd6990eda15ff2f698c8f09db37f5ba11d3a39b89fae50b6231c9ae2eae4a768", + strip_prefix = "cloud.google.com/go/channel@v1.17.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.17.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.17.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.17.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/channel/com_google_cloud_go_channel-v1.17.1.zip", ], ) go_repository( name = "com_google_cloud_go_cloudbuild", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/cloudbuild", - sha256 = "80d00c57b4b55e71e45e4c7427ee0da0aae082fc0b7be0fcdc2d756a71b9d8b3", - strip_prefix = "cloud.google.com/go/cloudbuild@v1.9.0", + sha256 = "f3e6b2f036308af4749695e059c274459a89a3329785a7a68492e85ecb5a5a22", + strip_prefix = "cloud.google.com/go/cloudbuild@v1.14.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.14.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.14.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.14.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/cloudbuild/com_google_cloud_go_cloudbuild-v1.14.1.zip", ], ) go_repository( name = "com_google_cloud_go_clouddms", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/clouddms", - sha256 = "9a9488b44e7a18811c0fcb13beb1fe9c3c5f7613b3109734af6f88af19843d90", - strip_prefix = "cloud.google.com/go/clouddms@v1.5.0", + sha256 = "bbdc27660a1a25cbe7f5c8d2ef3b87fc97910178f43a2ba037f7735b5939b186", + strip_prefix = "cloud.google.com/go/clouddms@v1.7.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.7.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.7.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.7.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/clouddms/com_google_cloud_go_clouddms-v1.7.1.zip", ], ) go_repository( name = "com_google_cloud_go_cloudtasks", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/cloudtasks", - sha256 = "9219724339007e7278d19a293285dcb45f4a38addc31d9722c98ce0b8095efe5", - strip_prefix = "cloud.google.com/go/cloudtasks@v1.10.0", + sha256 = "4a4c78416add083ce1b7a8e82138f08f5940c132a1a8705c97e4523f5fbd91e9", + strip_prefix = "cloud.google.com/go/cloudtasks@v1.12.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.12.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.12.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.12.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/cloudtasks/com_google_cloud_go_cloudtasks-v1.12.2.zip", ], ) go_repository( name = "com_google_cloud_go_compute", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/compute", - sha256 = "789696687da53dd22d22c5c49e0cc0636a44703459992236d18495e79d9b9c03", - strip_prefix = "cloud.google.com/go/compute@v1.19.0", + sha256 = "cde4afb8f4c63ff4b1b17feb0a9ae75abbd0fbfdd9e94ffa0eaaf300e803dce7", + strip_prefix = "cloud.google.com/go/compute@v1.23.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.19.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.19.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.19.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.19.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.23.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.23.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.23.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/compute/com_google_cloud_go_compute-v1.23.1.zip", ], ) go_repository( @@ -8042,247 +7990,247 @@ def go_deps(): name = "com_google_cloud_go_contactcenterinsights", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/contactcenterinsights", - sha256 = "e06630e09b6ee01e3693ff079ee6279de32566ae29fefeacdd410c61e1a1a5fe", - strip_prefix = "cloud.google.com/go/contactcenterinsights@v1.6.0", + sha256 = "9e08c7acf5ffb2fcff937872cad37d7e1a2dc1b7d0d70aa450beb7cb21c61b1c", + strip_prefix = "cloud.google.com/go/contactcenterinsights@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/contactcenterinsights/com_google_cloud_go_contactcenterinsights-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_container", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/container", - sha256 = "2dfba11e311b5dc9ea7e8b60cfd2dff3b060564a845bdac98945173dc3ef12ac", - strip_prefix = "cloud.google.com/go/container@v1.15.0", + sha256 = "f93f3636acb226560294d803e2f0b563e5ea5a0383707343ddd40c3287e53f43", + strip_prefix = "cloud.google.com/go/container@v1.26.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.15.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.15.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.15.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.15.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.26.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.26.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.26.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/container/com_google_cloud_go_container-v1.26.1.zip", ], ) go_repository( name = "com_google_cloud_go_containeranalysis", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/containeranalysis", - sha256 = "6319d5102b56fa4c4576fb3aa9b4aeb30f1c3f5e45bccd747d0da27ccfceb147", - strip_prefix = "cloud.google.com/go/containeranalysis@v0.9.0", + sha256 = "afe6c1616e50df702c99867da30e415b6fcaa6212175b4552c8fba41b171e803", + strip_prefix = "cloud.google.com/go/containeranalysis@v0.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/containeranalysis/com_google_cloud_go_containeranalysis-v0.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_datacatalog", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datacatalog", - sha256 = "2e79aaa321c13a3cd5d536aa5d8d295afacb03752862c4e78bcfc8ce99501ca6", - strip_prefix = "cloud.google.com/go/datacatalog@v1.13.0", + sha256 = "92d8c36abbd4a2224889e077ca5cfdf25ec9eecbbfd08e3c77817bbdfa414947", + strip_prefix = "cloud.google.com/go/datacatalog@v1.18.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.18.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.18.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.18.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datacatalog/com_google_cloud_go_datacatalog-v1.18.1.zip", ], ) go_repository( name = "com_google_cloud_go_dataflow", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dataflow", - sha256 = "f20f98ca4fb97f9c027f2e56edf7effe2c95f59d7d5a230dfa3be525fa130595", - strip_prefix = "cloud.google.com/go/dataflow@v0.8.0", + sha256 = "1b26af290f0f57e70e5ddf886e44c4e6e1d4c209819c3dcc698c199ff51ef00d", + strip_prefix = "cloud.google.com/go/dataflow@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataflow/com_google_cloud_go_dataflow-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_dataform", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dataform", - sha256 = "2867f6d78bb34adf8e295fb2158ad2df352cd28d79aa0c6e509dd5a389e04692", - strip_prefix = "cloud.google.com/go/dataform@v0.7.0", + sha256 = "da816fdd03b9c9240c1a62adeb2aed112e3bf68f775e45944791c8a912c4a69e", + strip_prefix = "cloud.google.com/go/dataform@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataform/com_google_cloud_go_dataform-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_datafusion", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datafusion", - sha256 = "9d12d5f177f6db6980afa69a9547e7653276bbb85821404d8856d432c56706bb", - strip_prefix = "cloud.google.com/go/datafusion@v1.6.0", + sha256 = "ea3e48d218c57cbfb2e2bbdab083d4218f918da3864c723fa36cd36af8dacf7e", + strip_prefix = "cloud.google.com/go/datafusion@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datafusion/com_google_cloud_go_datafusion-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_datalabeling", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datalabeling", - sha256 = "9a7084aa65112251f45ed12f3118a33667fb5e90bbd14ddc64c9c64655aee9f0", - strip_prefix = "cloud.google.com/go/datalabeling@v0.7.0", + sha256 = "51e5e5eb727485adc627c9ef3031b27747600804a7a2ae42275f2c4475dfab64", + strip_prefix = "cloud.google.com/go/datalabeling@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datalabeling/com_google_cloud_go_datalabeling-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_dataplex", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dataplex", - sha256 = "047519cc76aedf7b0ddb4e3145d9e96d88bc10776ef9252daa43acd25c367911", - strip_prefix = "cloud.google.com/go/dataplex@v1.6.0", + sha256 = "a2bcf17307e944d70a8745586df8e57f8a46feb464607715b2d916d69ea3431d", + strip_prefix = "cloud.google.com/go/dataplex@v1.10.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.10.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.10.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.10.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataplex/com_google_cloud_go_dataplex-v1.10.1.zip", ], ) go_repository( - name = "com_google_cloud_go_dataproc", + name = "com_google_cloud_go_dataproc_v2", build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/dataproc", - sha256 = "f4adc94c30406a2bd04b62f2a0c8c33ddb605ffda53024b034e5c136407f0c73", - strip_prefix = "cloud.google.com/go/dataproc@v1.12.0", + importpath = "cloud.google.com/go/dataproc/v2", + sha256 = "9f170a095d8a5b7976ce660a31414882a8f38d4443c3cf05d70fe28ceab1e985", + strip_prefix = "cloud.google.com/go/dataproc/v2@v2.2.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataproc/com_google_cloud_go_dataproc-v1.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataproc/com_google_cloud_go_dataproc-v1.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataproc/com_google_cloud_go_dataproc-v1.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataproc/com_google_cloud_go_dataproc-v1.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataproc/v2/com_google_cloud_go_dataproc_v2-v2.2.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataproc/v2/com_google_cloud_go_dataproc_v2-v2.2.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataproc/v2/com_google_cloud_go_dataproc_v2-v2.2.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataproc/v2/com_google_cloud_go_dataproc_v2-v2.2.1.zip", ], ) go_repository( name = "com_google_cloud_go_dataqna", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dataqna", - sha256 = "20e60cfe78e1b2f72122cf44184d8e9a9af7bdfc9e44a2c33e4b782dee477d25", - strip_prefix = "cloud.google.com/go/dataqna@v0.7.0", + sha256 = "69fcb32f4dc8b37e5a1a9e6fd3c33e64953ea06cb91fcfbcf59cafa31dfa8d86", + strip_prefix = "cloud.google.com/go/dataqna@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dataqna/com_google_cloud_go_dataqna-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_datastore", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datastore", - sha256 = "6b81cf09ce8daee02c880343ff82acfefbd3c7b67ff2b93bf9f1479c5e25f627", - strip_prefix = "cloud.google.com/go/datastore@v1.11.0", + sha256 = "8b89b61b9655adcfb197079184d0438dc15fc12aa7c3ef72f61fa8ddbad22880", + strip_prefix = "cloud.google.com/go/datastore@v1.15.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.15.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.15.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.15.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datastore/com_google_cloud_go_datastore-v1.15.0.zip", ], ) go_repository( name = "com_google_cloud_go_datastream", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/datastream", - sha256 = "02571fbbe7aa5052c91c2b99f3c799dc278bbe001871036101959338e789800c", - strip_prefix = "cloud.google.com/go/datastream@v1.7.0", + sha256 = "29df6dde384fe4c964970ef77462fd939b8c45d49ff7cb82fbc39596f8e34893", + strip_prefix = "cloud.google.com/go/datastream@v1.10.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.10.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.10.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.10.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/datastream/com_google_cloud_go_datastream-v1.10.1.zip", ], ) go_repository( name = "com_google_cloud_go_deploy", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/deploy", - sha256 = "9bf6d2ad426d9d80636ca5b7c1486b91a8e31c61a50a79856195fdad65bda004", - strip_prefix = "cloud.google.com/go/deploy@v1.8.0", + sha256 = "f7555f1cd13f36ae70982f4729531176cf322a75c6b9dde6c72f8a843d525481", + strip_prefix = "cloud.google.com/go/deploy@v1.13.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.13.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.13.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.13.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/deploy/com_google_cloud_go_deploy-v1.13.1.zip", ], ) go_repository( name = "com_google_cloud_go_dialogflow", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dialogflow", - sha256 = "de2009a08b3db53b7292852a7c28dd52218c8fcb7937fc0049b0219e429bafdb", - strip_prefix = "cloud.google.com/go/dialogflow@v1.32.0", + sha256 = "9a17b92cec11b7877f38b7d31bb42928710b87f55b54c703b0858a86bf26421f", + strip_prefix = "cloud.google.com/go/dialogflow@v1.44.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.32.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.32.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.32.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.32.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.44.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.44.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.44.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dialogflow/com_google_cloud_go_dialogflow-v1.44.1.zip", ], ) go_repository( name = "com_google_cloud_go_dlp", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/dlp", - sha256 = "a32c4dbda0445a401ec25e9faf3f10b25b6fd264917825a0d053e6e297cdfc61", - strip_prefix = "cloud.google.com/go/dlp@v1.9.0", + sha256 = "787fb0c860a5a6df47080feb59dc34bb26fc23fed5a98d8fc7b42636bc81b5ac", + strip_prefix = "cloud.google.com/go/dlp@v1.10.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.10.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.10.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.10.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/dlp/com_google_cloud_go_dlp-v1.10.2.zip", ], ) go_repository( name = "com_google_cloud_go_documentai", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/documentai", - sha256 = "9806274a2a5af71b115ddc7357be24757b0331b1661cac642f7d0eb6b6894a7b", - strip_prefix = "cloud.google.com/go/documentai@v1.18.0", + sha256 = "d306b29ea9ed00003eb4a72de10527a9898b32a67a5d9b1c028845db6ee977e6", + strip_prefix = "cloud.google.com/go/documentai@v1.23.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.18.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.18.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.18.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.18.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.23.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.23.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.23.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/documentai/com_google_cloud_go_documentai-v1.23.2.zip", ], ) go_repository( name = "com_google_cloud_go_domains", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/domains", - sha256 = "26ed447b319c064d0ce19d85c6de127af1aa87c727af6202b1f7a3b95d35bd0a", - strip_prefix = "cloud.google.com/go/domains@v0.8.0", + sha256 = "541b361f96b0f849324e794b2b24f53aa73e30dbb1ba02f12e94b5dd38759db7", + strip_prefix = "cloud.google.com/go/domains@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/domains/com_google_cloud_go_domains-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_edgecontainer", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/edgecontainer", - sha256 = "c22e2f212fcfcf9f0af32c43c47b4311fc07c382e78810a34afe273ba363429c", - strip_prefix = "cloud.google.com/go/edgecontainer@v1.0.0", + sha256 = "6671ed73144587cd1f5d20982d395a8628ad130ffea7d064d790e2c92274b3b3", + strip_prefix = "cloud.google.com/go/edgecontainer@v1.1.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.0.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.0.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.0.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.0.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.1.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.1.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.1.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/edgecontainer/com_google_cloud_go_edgecontainer-v1.1.2.zip", ], ) go_repository( @@ -8302,767 +8250,715 @@ def go_deps(): name = "com_google_cloud_go_essentialcontacts", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/essentialcontacts", - sha256 = "b595846269076fbabcee96eda6718c41c1b94c2758edc42537f490accaa40b19", - strip_prefix = "cloud.google.com/go/essentialcontacts@v1.5.0", + sha256 = "100839140d920ea39df237c99782dae60aa6827be723a8f17dcd77f29ff71eca", + strip_prefix = "cloud.google.com/go/essentialcontacts@v1.6.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.6.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.6.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.6.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/essentialcontacts/com_google_cloud_go_essentialcontacts-v1.6.3.zip", ], ) go_repository( name = "com_google_cloud_go_eventarc", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/eventarc", - sha256 = "6bdda029e620653f4dcdc10fa1099ec6b28c0e5ecbb5c1b34b58374efcc1beec", - strip_prefix = "cloud.google.com/go/eventarc@v1.11.0", + sha256 = "c1abc76d8cdebcf2fbff6a5f5c289479749713033ba188853f6156b1f3a2c575", + strip_prefix = "cloud.google.com/go/eventarc@v1.13.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.13.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.13.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.13.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/eventarc/com_google_cloud_go_eventarc-v1.13.1.zip", ], ) go_repository( name = "com_google_cloud_go_filestore", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/filestore", - sha256 = "77c99a79955f99b33988d4ce7d4656ab3bbeaef794d788ae295eccdecf799839", - strip_prefix = "cloud.google.com/go/filestore@v1.6.0", + sha256 = "ddfc413e66b4e18263d250a7bc7d2d723b4007729107f4a33efc90fafb7149ea", + strip_prefix = "cloud.google.com/go/filestore@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/filestore/com_google_cloud_go_filestore-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_firestore", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/firestore", - sha256 = "f4bd0f35095358181574ae03a8bed7618fe8f50a63d54b2e49a85d71c47104c7", - strip_prefix = "cloud.google.com/go/firestore@v1.9.0", + sha256 = "4e14ba924858cda4925eccd288c8fb4ad377f227252138fe1681c0e9391ad3e0", + strip_prefix = "cloud.google.com/go/firestore@v1.13.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.13.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.13.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.13.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/firestore/com_google_cloud_go_firestore-v1.13.0.zip", ], ) go_repository( name = "com_google_cloud_go_functions", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/functions", - sha256 = "9635cbe16b0bf748108ce30c4686a909227d342e2ed47c1c1c45cfaa44be6d89", - strip_prefix = "cloud.google.com/go/functions@v1.13.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.13.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_gaming", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/gaming", - sha256 = "5a0680fb577f1ea1d3e815ff2e7fa22931e2c9e492e151087cdef34b1f9ece97", - strip_prefix = "cloud.google.com/go/gaming@v1.9.0", + sha256 = "34232aa309d00ffef25ac784e2e1a702c8aaf5b921152c257772f59dbe4234ee", + strip_prefix = "cloud.google.com/go/functions@v1.15.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gaming/com_google_cloud_go_gaming-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gaming/com_google_cloud_go_gaming-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gaming/com_google_cloud_go_gaming-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gaming/com_google_cloud_go_gaming-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.15.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.15.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.15.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/functions/com_google_cloud_go_functions-v1.15.2.zip", ], ) go_repository( name = "com_google_cloud_go_gkebackup", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gkebackup", - sha256 = "d7a06be74c96d73dc3f032431cffd1e01656c670ed85d70da916933b4a91d85d", - strip_prefix = "cloud.google.com/go/gkebackup@v0.4.0", + sha256 = "f1617ab86d537328e3f3c36790da6d432caf00df1c60d7f7c59e49b3552296bf", + strip_prefix = "cloud.google.com/go/gkebackup@v1.3.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v0.4.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v0.4.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v0.4.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v0.4.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v1.3.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v1.3.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v1.3.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkebackup/com_google_cloud_go_gkebackup-v1.3.2.zip", ], ) go_repository( name = "com_google_cloud_go_gkeconnect", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gkeconnect", - sha256 = "37fe8da6dd9a04e90a245093f72b30dae67d511ab13a6c24db25b3ee8c547d25", - strip_prefix = "cloud.google.com/go/gkeconnect@v0.7.0", + sha256 = "009e1bb490e0091744f0d5ff7b3b7cbe5085a7795b775204fe45e80535f452ce", + strip_prefix = "cloud.google.com/go/gkeconnect@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkeconnect/com_google_cloud_go_gkeconnect-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_gkehub", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gkehub", - sha256 = "e44073c24ed21976762f6a13f0adad46863eec5ac1dbaa20045fc0b63e1fd2ce", - strip_prefix = "cloud.google.com/go/gkehub@v0.12.0", + sha256 = "acef02e283a877fae6242895bea73e00c655a239b6a34e6c4f26dde75214e897", + strip_prefix = "cloud.google.com/go/gkehub@v0.14.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.14.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.14.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.14.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkehub/com_google_cloud_go_gkehub-v0.14.2.zip", ], ) go_repository( name = "com_google_cloud_go_gkemulticloud", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gkemulticloud", - sha256 = "9c851d037561d6cc67c20b247c505ca9c0697dc7e85251bd756f478f473483b1", - strip_prefix = "cloud.google.com/go/gkemulticloud@v0.5.0", + sha256 = "ad4f2be30a3e031aaec680b8f4548876b024e02a835a51b4418f04c1a0d45437", + strip_prefix = "cloud.google.com/go/gkemulticloud@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v0.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v0.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v0.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v0.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v1.0.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gkemulticloud/com_google_cloud_go_gkemulticloud-v1.0.1.zip", ], ) go_repository( name = "com_google_cloud_go_gsuiteaddons", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/gsuiteaddons", - sha256 = "911963d78ba7974bd3e807888fde1879a5c871cdf3c43369eebb9778a3fdc4c1", - strip_prefix = "cloud.google.com/go/gsuiteaddons@v1.5.0", + sha256 = "c31266cc003017a841473f2eaa162d0d4a58302ac6085153c8961b8673af1b6a", + strip_prefix = "cloud.google.com/go/gsuiteaddons@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/gsuiteaddons/com_google_cloud_go_gsuiteaddons-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_iam", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/iam", - sha256 = "a8236c53eb06cc21c5c972fcfc4153fbce5a44eb7a1b7c88cadc307b8768328a", - strip_prefix = "cloud.google.com/go/iam@v0.13.0", + sha256 = "56e6aba936af03c61fc21eb58f562596cadd6bacc30a07a7fb2a2516c28764bb", + strip_prefix = "cloud.google.com/go/iam@v1.1.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v0.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v0.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v0.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v0.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v1.1.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v1.1.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v1.1.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iam/com_google_cloud_go_iam-v1.1.3.zip", ], ) go_repository( name = "com_google_cloud_go_iap", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/iap", - sha256 = "c2e76b45c74ecebad179dca0398a5279bcf47d30c35d8c347c8d59d98f944f90", - strip_prefix = "cloud.google.com/go/iap@v1.7.1", + sha256 = "70dd5562de160017ea166cbd5a959eda628b025bc6dc93a269fc183d96eec8cf", + strip_prefix = "cloud.google.com/go/iap@v1.9.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.7.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.7.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.7.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.7.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.9.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.9.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.9.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iap/com_google_cloud_go_iap-v1.9.1.zip", ], ) go_repository( name = "com_google_cloud_go_ids", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/ids", - sha256 = "8a684da48da978ae35937cb3b9a84da1a7673789e8363501ccc317108b712913", - strip_prefix = "cloud.google.com/go/ids@v1.3.0", + sha256 = "5774a1cf5f3e09af43d38f37163c3ead590bb06119d4256e1a2670d40190094d", + strip_prefix = "cloud.google.com/go/ids@v1.4.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.3.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.3.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.3.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.3.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.4.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.4.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.4.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/ids/com_google_cloud_go_ids-v1.4.2.zip", ], ) go_repository( name = "com_google_cloud_go_iot", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/iot", - sha256 = "960bf7d2c22c0c31d9d903343672d1e949d2bb1442264c15d9de57659b51e126", - strip_prefix = "cloud.google.com/go/iot@v1.6.0", + sha256 = "4dbe9fdf18ecd066a3986acd9a98680adac6a1cf50bbac6c04432e3059570fea", + strip_prefix = "cloud.google.com/go/iot@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/iot/com_google_cloud_go_iot-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_kms", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/kms", - sha256 = "7f54a8218570636a93ea8b33843ed179b4b881f7d5aa8982912ddfdf7090ba38", - strip_prefix = "cloud.google.com/go/kms@v1.10.1", + sha256 = "89ef8ac2bde3827d875928c8911c3ac874b7f0f6d1396ba3f6f17b51448738c4", + strip_prefix = "cloud.google.com/go/kms@v1.15.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.10.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.10.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.10.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.10.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.15.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.15.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.15.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/kms/com_google_cloud_go_kms-v1.15.3.zip", ], ) go_repository( name = "com_google_cloud_go_language", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/language", - sha256 = "c66908967b2558c00ca79b31f6788a1cd5f7ba9ee24ebe109ea3b4ac1ab372a1", - strip_prefix = "cloud.google.com/go/language@v1.9.0", + sha256 = "353e525423b6547a806aea28f63f401759fd090855a1544c1228f48337470201", + strip_prefix = "cloud.google.com/go/language@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/language/com_google_cloud_go_language-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_lifesciences", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/lifesciences", - sha256 = "8638174541f6d1b8d03cce39e94d5ba7b85def5550151e69c4d54e61d60101e3", - strip_prefix = "cloud.google.com/go/lifesciences@v0.8.0", + sha256 = "32e38b08fcef1d06b7512261d1116fbac7e5e331942e4512a26d73f62625e5d6", + strip_prefix = "cloud.google.com/go/lifesciences@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/lifesciences/com_google_cloud_go_lifesciences-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_logging", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/logging", - sha256 = "1b56716e7440c5064ed17af2c40bbba0c2e0f1d628f9f4864e81b7bd2958a2f3", - strip_prefix = "cloud.google.com/go/logging@v1.7.0", + sha256 = "8b2275192caa4b3f260c23edcf2ae08a45e510573fca5487c7a21056fd88d3f9", + strip_prefix = "cloud.google.com/go/logging@v1.8.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.8.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.8.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.8.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/logging/com_google_cloud_go_logging-v1.8.1.zip", ], ) go_repository( name = "com_google_cloud_go_longrunning", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/longrunning", - sha256 = "6cb4e4a6b80435cb12ab0192ca281893e750f20903cdf5f2432a6d61db190361", - strip_prefix = "cloud.google.com/go/longrunning@v0.4.1", + sha256 = "ba7fd6475a3e6f6335461d5a707232ccf8336397802e83c5401c2308906ee76b", + strip_prefix = "cloud.google.com/go/longrunning@v0.5.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.4.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.4.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.4.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.4.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.5.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.5.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.5.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/longrunning/com_google_cloud_go_longrunning-v0.5.2.zip", ], ) go_repository( name = "com_google_cloud_go_managedidentities", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/managedidentities", - sha256 = "6ca18f1a180e7ce3159b8c6fdf93ba66122775a112874d9ce9a7f9fca3150a95", - strip_prefix = "cloud.google.com/go/managedidentities@v1.5.0", + sha256 = "d81fe6c82e62b737a430b640c698220c61722b122b6ac9722e7b365eecd4e074", + strip_prefix = "cloud.google.com/go/managedidentities@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/managedidentities/com_google_cloud_go_managedidentities-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_maps", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/maps", - sha256 = "9988ceccfc296bc154f5cbd0ae455131ddec336e93293b07d1c5f4948653dd93", - strip_prefix = "cloud.google.com/go/maps@v0.7.0", + sha256 = "c9cb6250a7ff92586fb2e212fc3b36437c9baa74e9b373461a0b33e40d359909", + strip_prefix = "cloud.google.com/go/maps@v1.4.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v1.4.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v1.4.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v1.4.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/maps/com_google_cloud_go_maps-v1.4.1.zip", ], ) go_repository( name = "com_google_cloud_go_mediatranslation", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/mediatranslation", - sha256 = "e78d770431918e6653b61029adf076402e15875acaa165c0db216567abeb5e63", - strip_prefix = "cloud.google.com/go/mediatranslation@v0.7.0", + sha256 = "6805e80cc3a6615eb086c1efdbc57bd378779962d134e22509af1ef77db1eb7f", + strip_prefix = "cloud.google.com/go/mediatranslation@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/mediatranslation/com_google_cloud_go_mediatranslation-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_memcache", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/memcache", - sha256 = "e01bca761af97779d7a4b0d632fd0463d324b80fac75662c594dd008270ed389", - strip_prefix = "cloud.google.com/go/memcache@v1.9.0", + sha256 = "80d1544a452cdb9a051cdd577a2dc018b56a9250c54ca5df194c65855a6cf7b5", + strip_prefix = "cloud.google.com/go/memcache@v1.10.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.10.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.10.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.10.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/memcache/com_google_cloud_go_memcache-v1.10.2.zip", ], ) go_repository( name = "com_google_cloud_go_metastore", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/metastore", - sha256 = "6ec835f8d18b39056072b7814a51cd6c22179cbf97f2b0204dc73d94082f00a4", - strip_prefix = "cloud.google.com/go/metastore@v1.10.0", + sha256 = "fe4aa0aa4abd1bd6c1cb3c6d506c3acfb58e9851cdbe91b017360e4ce6533ff9", + strip_prefix = "cloud.google.com/go/metastore@v1.13.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.13.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.13.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.13.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/metastore/com_google_cloud_go_metastore-v1.13.1.zip", ], ) go_repository( name = "com_google_cloud_go_monitoring", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/monitoring", - sha256 = "3ed009f1b492887939537dc59bea91ad78129eab5cba1fb4f090690a0f2a1f22", - strip_prefix = "cloud.google.com/go/monitoring@v1.13.0", + sha256 = "545af97f19cde57c99d37c8741d45f110a472f62e348313cef2054f8623661cd", + strip_prefix = "cloud.google.com/go/monitoring@v1.16.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.16.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.16.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.16.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/monitoring/com_google_cloud_go_monitoring-v1.16.1.zip", ], ) go_repository( name = "com_google_cloud_go_networkconnectivity", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/networkconnectivity", - sha256 = "c2cd6ef6c8a4141ea70a20669000695559d3f3d41498de98c61878597cca05ea", - strip_prefix = "cloud.google.com/go/networkconnectivity@v1.11.0", + sha256 = "fa63079ab0dfcd34b074145057487d18d95e6b380b2b19c4a9a2113303333fdb", + strip_prefix = "cloud.google.com/go/networkconnectivity@v1.14.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.14.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.14.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.14.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networkconnectivity/com_google_cloud_go_networkconnectivity-v1.14.1.zip", ], ) go_repository( name = "com_google_cloud_go_networkmanagement", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/networkmanagement", - sha256 = "4c74b55c69b73655d14d2198be6d6e8d4da240e7284c5c99eb2a7591bb95c187", - strip_prefix = "cloud.google.com/go/networkmanagement@v1.6.0", + sha256 = "bed1460ce979230c94121f814c62aee524f223568bfcf00962e8683379016c49", + strip_prefix = "cloud.google.com/go/networkmanagement@v1.9.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.9.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.9.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.9.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networkmanagement/com_google_cloud_go_networkmanagement-v1.9.1.zip", ], ) go_repository( name = "com_google_cloud_go_networksecurity", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/networksecurity", - sha256 = "1a358f55bb3daaba03ad22fe0ecbf67f334e829f3c7412de37f85b607572cb67", - strip_prefix = "cloud.google.com/go/networksecurity@v0.8.0", + sha256 = "b4e959bd1d9c97e7267c529ee023fa55a7bbcd7b5f2964b6c99f0fb51006dbcb", + strip_prefix = "cloud.google.com/go/networksecurity@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/networksecurity/com_google_cloud_go_networksecurity-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_notebooks", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/notebooks", - sha256 = "24ca6efce18d2cb1001280ad2c3dc2a002279b258ecf5d20bf912b666b19d279", - strip_prefix = "cloud.google.com/go/notebooks@v1.8.0", + sha256 = "9f33be59f75e363bd6a90a465ada18ad8cc549b82facbb9541b81ae9b3ba7937", + strip_prefix = "cloud.google.com/go/notebooks@v1.10.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.10.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.10.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.10.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/notebooks/com_google_cloud_go_notebooks-v1.10.1.zip", ], ) go_repository( name = "com_google_cloud_go_optimization", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/optimization", - sha256 = "a86473b6c76f5669e4c98ad4837a2ec77faab9bfabeb52c0f26b10019e039986", - strip_prefix = "cloud.google.com/go/optimization@v1.3.1", + sha256 = "feb9d564067168d48be5e2bde7f19032a7c27a779beefc09d3aa19f0c2b9eaf2", + strip_prefix = "cloud.google.com/go/optimization@v1.5.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.3.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.3.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.3.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.3.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.5.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.5.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.5.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/optimization/com_google_cloud_go_optimization-v1.5.1.zip", ], ) go_repository( name = "com_google_cloud_go_orchestration", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/orchestration", - sha256 = "9568ea88c1626f6d69ac48abcbd4dfab26aebe3be89a19f179bf3277bcda26e9", - strip_prefix = "cloud.google.com/go/orchestration@v1.6.0", + sha256 = "81f752cb325e335254f9c33be9d7bacdf0fbc8ef929828a0496e35b2787dc2df", + strip_prefix = "cloud.google.com/go/orchestration@v1.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/orchestration/com_google_cloud_go_orchestration-v1.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_orgpolicy", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/orgpolicy", - sha256 = "6fa13831a918ac690ed1073967e210349a13c2cd9bf51f84ba5cd6522a052d32", - strip_prefix = "cloud.google.com/go/orgpolicy@v1.10.0", + sha256 = "2a4f68b17cf411c15e384e0bb1a7d5c823e9212129535075588cbd4c3e3da73d", + strip_prefix = "cloud.google.com/go/orgpolicy@v1.11.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.11.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.11.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.11.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/orgpolicy/com_google_cloud_go_orgpolicy-v1.11.2.zip", ], ) go_repository( name = "com_google_cloud_go_osconfig", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/osconfig", - sha256 = "8f97d324f398aebb4af096041f8547a5b6b09cba754ba082fe3eca7f29a8b885", - strip_prefix = "cloud.google.com/go/osconfig@v1.11.0", + sha256 = "275f62c21ecf8371b55e778ab87e9ef588ced27cc63a9e985ab5029eddcdb843", + strip_prefix = "cloud.google.com/go/osconfig@v1.12.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.12.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.12.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.12.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/osconfig/com_google_cloud_go_osconfig-v1.12.2.zip", ], ) go_repository( name = "com_google_cloud_go_oslogin", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/oslogin", - sha256 = "4e1f1ec2a64a8bb7f878185b3e618bb077df6fa94ed6704ab012e18c4ecd4fce", - strip_prefix = "cloud.google.com/go/oslogin@v1.9.0", + sha256 = "29ef72254fe0efd3778f6cc8fbc3df9c33f7ce7b6045d0f6d96eb876044b2237", + strip_prefix = "cloud.google.com/go/oslogin@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/oslogin/com_google_cloud_go_oslogin-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_phishingprotection", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/phishingprotection", - sha256 = "7a3ce8e6b2c8f828fcd344b653849cf1e90abeca48a7eef81c75a72cb924d9e2", - strip_prefix = "cloud.google.com/go/phishingprotection@v0.7.0", + sha256 = "6244bb1f396e3cb42e513ddc934923f0e14ab55f40b828f16c90be57a4b94a84", + strip_prefix = "cloud.google.com/go/phishingprotection@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/phishingprotection/com_google_cloud_go_phishingprotection-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_policytroubleshooter", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/policytroubleshooter", - sha256 = "9d5fccfe01a31ec395ba3a26474168e5a8db09275dfbdfcd5dfd44923d9ac4bd", - strip_prefix = "cloud.google.com/go/policytroubleshooter@v1.6.0", + sha256 = "b46e74184e6b8ed9943f474b976fb6bedd6a4d0700ac696012300886922f9d98", + strip_prefix = "cloud.google.com/go/policytroubleshooter@v1.9.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.9.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.9.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.9.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/policytroubleshooter/com_google_cloud_go_policytroubleshooter-v1.9.1.zip", ], ) go_repository( name = "com_google_cloud_go_privatecatalog", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/privatecatalog", - sha256 = "f475f487df7906e4e35bda4b69ce53f141ade7ea6463674eb9b57f5fa302c367", - strip_prefix = "cloud.google.com/go/privatecatalog@v0.8.0", + sha256 = "a43190e1dfba2ed7fcb63e5571937bdfc2ed97594fa9b2b7bd119678e977b0f4", + strip_prefix = "cloud.google.com/go/privatecatalog@v0.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/privatecatalog/com_google_cloud_go_privatecatalog-v0.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_pubsub", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/pubsub", - sha256 = "9c15c75b6204fd3d42114006896a72d82827d01a756d2f78423c101102da4977", - strip_prefix = "cloud.google.com/go/pubsub@v1.30.0", + sha256 = "87f423671714647e1817126df5403c57f1d4627c3e4e83664213a678126e40ca", + strip_prefix = "cloud.google.com/go/pubsub@v1.33.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.30.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.30.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.30.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.30.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.33.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.33.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.33.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/pubsub/com_google_cloud_go_pubsub-v1.33.0.zip", ], ) go_repository( name = "com_google_cloud_go_pubsublite", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/pubsublite", - sha256 = "97b1c3637961faf18229a168a5811425b4e64ee6d81bb76e51ebbf93ff3622ba", - strip_prefix = "cloud.google.com/go/pubsublite@v1.7.0", + sha256 = "41933a60c5e0995025320fe1c155b31d636178e60838b04aca9eab0c8c9f3227", + strip_prefix = "cloud.google.com/go/pubsublite@v1.8.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.8.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.8.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.8.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/pubsublite/com_google_cloud_go_pubsublite-v1.8.1.zip", ], ) go_repository( name = "com_google_cloud_go_recaptchaenterprise_v2", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/recaptchaenterprise/v2", - sha256 = "dbf218232a443651daa58869fb5e87845927c33d683f4fd4f6f4306e056bb7d0", - strip_prefix = "cloud.google.com/go/recaptchaenterprise/v2@v2.7.0", + sha256 = "8151e658e29acc0617fa5bc36f7d6f06a61e8b97558f79fd6137429538ad903f", + strip_prefix = "cloud.google.com/go/recaptchaenterprise/v2@v2.8.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.8.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.8.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.8.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recaptchaenterprise/v2/com_google_cloud_go_recaptchaenterprise_v2-v2.8.1.zip", ], ) go_repository( name = "com_google_cloud_go_recommendationengine", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/recommendationengine", - sha256 = "33cf95d20d5c036b5595c0f66005d82eb3ddb3ccebdcc69c120a1567b0f12f40", - strip_prefix = "cloud.google.com/go/recommendationengine@v0.7.0", + sha256 = "a2636073ab9bd418361f38332b4e922fcfe5ca6bc10aca96f6fcbdab7a37456d", + strip_prefix = "cloud.google.com/go/recommendationengine@v0.8.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.8.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.8.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.8.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recommendationengine/com_google_cloud_go_recommendationengine-v0.8.2.zip", ], ) go_repository( name = "com_google_cloud_go_recommender", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/recommender", - sha256 = "8e9ccaf1167b4a7d3fd682581537f525f712af72c99b586aaea05832b82c86e8", - strip_prefix = "cloud.google.com/go/recommender@v1.9.0", + sha256 = "1f0585da517bd4163b8482c6810cf1c119c0ad5a4d038bdcaa6491a5b3d1417b", + strip_prefix = "cloud.google.com/go/recommender@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/recommender/com_google_cloud_go_recommender-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_redis", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/redis", - sha256 = "51e5063e393d443f9d265b2aad809f45cee8af95a41ab8b532af38711ff451dc", - strip_prefix = "cloud.google.com/go/redis@v1.11.0", + sha256 = "53e5bd33b17517627ce13404b784a7c8b2b8c65719e8f70977616b061834ee87", + strip_prefix = "cloud.google.com/go/redis@v1.13.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.11.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.11.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.11.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.11.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.13.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.13.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.13.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/redis/com_google_cloud_go_redis-v1.13.2.zip", ], ) go_repository( name = "com_google_cloud_go_resourcemanager", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/resourcemanager", - sha256 = "92bba6de5d69d3928378722537f0b76ec8f958cece23acb9336512f3407eb8e4", - strip_prefix = "cloud.google.com/go/resourcemanager@v1.7.0", + sha256 = "bb51f46e5a6a219191c258f1b395a1129fc96d4ea940eff412191522c0dbd043", + strip_prefix = "cloud.google.com/go/resourcemanager@v1.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/resourcemanager/com_google_cloud_go_resourcemanager-v1.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_resourcesettings", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/resourcesettings", - sha256 = "9ff4470670ebcfa07f7964f85e312e41901afed236c14ecd10952d90e81f99f7", - strip_prefix = "cloud.google.com/go/resourcesettings@v1.5.0", + sha256 = "f2327ef037487c9f183e3a52e6456c087f8dc8325311bc6dcb77e5a8c030e360", + strip_prefix = "cloud.google.com/go/resourcesettings@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/resourcesettings/com_google_cloud_go_resourcesettings-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_retail", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/retail", - sha256 = "5e71739001223ca2cdf7a6fa0ff61673a407ec18503fdd772b96e91ce42b67fc", - strip_prefix = "cloud.google.com/go/retail@v1.12.0", + sha256 = "85c1df965d36c1449655ae20ff44c18eab4177babbc2a851764941073b623862", + strip_prefix = "cloud.google.com/go/retail@v1.14.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.12.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.12.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.12.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.12.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.14.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.14.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.14.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/retail/com_google_cloud_go_retail-v1.14.2.zip", ], ) go_repository( name = "com_google_cloud_go_run", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/run", - sha256 = "7828480d028ff1b8496855bbd9dc264e772fae5f7866ceb5e1a7db6f18052edd", - strip_prefix = "cloud.google.com/go/run@v0.9.0", + sha256 = "5382527d044acc067f77f94001d094b1bd990fe91e68dd6de38d86b8eed9bc04", + strip_prefix = "cloud.google.com/go/run@v1.3.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v0.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v0.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v0.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v0.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v1.3.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v1.3.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v1.3.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/run/com_google_cloud_go_run-v1.3.1.zip", ], ) go_repository( name = "com_google_cloud_go_scheduler", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/scheduler", - sha256 = "3e225392a86a45fa9b5144f18bd3ea418f0cd7fab270ab4524a2e897bae54416", - strip_prefix = "cloud.google.com/go/scheduler@v1.9.0", + sha256 = "171f330ba8477da1ab647cc6707f963300fec30750a37e5dd7935d2387c2116d", + strip_prefix = "cloud.google.com/go/scheduler@v1.10.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.10.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.10.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.10.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/scheduler/com_google_cloud_go_scheduler-v1.10.2.zip", ], ) go_repository( name = "com_google_cloud_go_secretmanager", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/secretmanager", - sha256 = "d24cb4f507e9d531f7d75a4b070bff5f9dc548a2be1591337f4865cd8b084929", - strip_prefix = "cloud.google.com/go/secretmanager@v1.10.0", + sha256 = "4dc8dc1f3dc60a6aa5038ccc7ac988e53af136a544021391a82af303cf811e5a", + strip_prefix = "cloud.google.com/go/secretmanager@v1.11.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.11.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.11.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.11.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/secretmanager/com_google_cloud_go_secretmanager-v1.11.2.zip", ], ) go_repository( name = "com_google_cloud_go_security", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/security", - sha256 = "e74202ce5419ed745d1c8089a2e4ffb790c0bc045d4f4ab788129ea0f0f5576d", - strip_prefix = "cloud.google.com/go/security@v1.13.0", + sha256 = "8c32c5308500ac6dad8bf4ab42d84c18e5d19bcbfbdb1879a349a6a62d428a61", + strip_prefix = "cloud.google.com/go/security@v1.15.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.13.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.13.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.13.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.13.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.15.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.15.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.15.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/security/com_google_cloud_go_security-v1.15.2.zip", ], ) go_repository( name = "com_google_cloud_go_securitycenter", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/securitycenter", - sha256 = "0f451a28499260a21edf268bb8b657fc55fb81a883ab47fb3d2ca472f8707afd", - strip_prefix = "cloud.google.com/go/securitycenter@v1.19.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.19.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.19.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.19.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.19.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_servicecontrol", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/servicecontrol", - sha256 = "499ce8763d315e0ffdf3705549a507051a27eff9b8dec9debe43bca8d130fabb", - strip_prefix = "cloud.google.com/go/servicecontrol@v1.11.1", + sha256 = "ed0594a9ed6c492d125c67f490e3a94c135e870a98342e9216df12162fa6911e", + strip_prefix = "cloud.google.com/go/securitycenter@v1.23.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/servicecontrol/com_google_cloud_go_servicecontrol-v1.11.1.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/servicecontrol/com_google_cloud_go_servicecontrol-v1.11.1.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/servicecontrol/com_google_cloud_go_servicecontrol-v1.11.1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/servicecontrol/com_google_cloud_go_servicecontrol-v1.11.1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.23.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.23.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.23.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/securitycenter/com_google_cloud_go_securitycenter-v1.23.1.zip", ], ) go_repository( name = "com_google_cloud_go_servicedirectory", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/servicedirectory", - sha256 = "4705df69c7e353bfa6a03dad8a50dde5066151b82528946b818df40547c79088", - strip_prefix = "cloud.google.com/go/servicedirectory@v1.9.0", + sha256 = "266651c6851b26c8047bec746633d5773d80463352f8e708da3cf2cd7fdfbe40", + strip_prefix = "cloud.google.com/go/servicedirectory@v1.11.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.9.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_servicemanagement", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/servicemanagement", - sha256 = "2e02a723d1c226c2ecba4e47892b96052efb941be2910fd7afc38197f5bc6083", - strip_prefix = "cloud.google.com/go/servicemanagement@v1.8.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/servicemanagement/com_google_cloud_go_servicemanagement-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/servicemanagement/com_google_cloud_go_servicemanagement-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/servicemanagement/com_google_cloud_go_servicemanagement-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/servicemanagement/com_google_cloud_go_servicemanagement-v1.8.0.zip", - ], - ) - go_repository( - name = "com_google_cloud_go_serviceusage", - build_file_proto_mode = "disable_global", - importpath = "cloud.google.com/go/serviceusage", - sha256 = "377bad0176bbec558ddb55b1fe10318e2c034c9e87536aba1ba8216b57548f3f", - strip_prefix = "cloud.google.com/go/serviceusage@v1.6.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/serviceusage/com_google_cloud_go_serviceusage-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/serviceusage/com_google_cloud_go_serviceusage-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/serviceusage/com_google_cloud_go_serviceusage-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/serviceusage/com_google_cloud_go_serviceusage-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.11.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.11.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.11.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/servicedirectory/com_google_cloud_go_servicedirectory-v1.11.1.zip", ], ) go_repository( name = "com_google_cloud_go_shell", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/shell", - sha256 = "f88e9c2ff25a5ea22d71a1125cc6e756845ec8221c821092d05e67859966ca48", - strip_prefix = "cloud.google.com/go/shell@v1.6.0", + sha256 = "f50cd0726dd7109c75b9775b6750b3316acb1f764d608ff02278e98cff327ecd", + strip_prefix = "cloud.google.com/go/shell@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/shell/com_google_cloud_go_shell-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_spanner", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/spanner", - sha256 = "e4f3951ea69d07ed383f41579c3a6af8e639558ecfa796421dc6cf3d268118ec", - strip_prefix = "cloud.google.com/go/spanner@v1.45.0", + sha256 = "eb0dd423ec976db7be0e6a709cab5d7ad2b9e20ca53cf9cd9663475bf896531a", + strip_prefix = "cloud.google.com/go/spanner@v1.50.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.45.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.45.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.45.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.45.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.50.0.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.50.0.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.50.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/spanner/com_google_cloud_go_spanner-v1.50.0.zip", ], ) go_repository( name = "com_google_cloud_go_speech", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/speech", - sha256 = "27c7d30f3573b4d14a6096588fef65635bf7df8b98e921e934a0af1c7fcf7771", - strip_prefix = "cloud.google.com/go/speech@v1.15.0", + sha256 = "1c184b4698eea3072656dc4e4a0279befdd6d2fa31989f5006b0e46ee7ea5ceb", + strip_prefix = "cloud.google.com/go/speech@v1.19.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.15.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.15.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.15.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.15.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.19.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.19.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.19.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/speech/com_google_cloud_go_speech-v1.19.1.zip", ], ) go_repository( @@ -9082,195 +8978,195 @@ def go_deps(): name = "com_google_cloud_go_storagetransfer", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/storagetransfer", - sha256 = "16e315b990875ac30d149de8b20f75338b178a9a4d34f03a7e181ed5fba7dd33", - strip_prefix = "cloud.google.com/go/storagetransfer@v1.8.0", + sha256 = "e5c5f002aa7c05a4702d1bb4568b0d63a3384e095402517afaae4147fd6169e8", + strip_prefix = "cloud.google.com/go/storagetransfer@v1.10.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.10.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.10.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.10.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/storagetransfer/com_google_cloud_go_storagetransfer-v1.10.1.zip", ], ) go_repository( name = "com_google_cloud_go_talent", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/talent", - sha256 = "e6de9c5d91eb9c336fe36bc6c40c724f75773afe38f8719ec31add3a144328e6", - strip_prefix = "cloud.google.com/go/talent@v1.5.0", + sha256 = "b797a2106e3aca18898ea51144e2308574f49f840fe51fe06f03d1dea56646e1", + strip_prefix = "cloud.google.com/go/talent@v1.6.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.6.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.6.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.6.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/talent/com_google_cloud_go_talent-v1.6.3.zip", ], ) go_repository( name = "com_google_cloud_go_texttospeech", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/texttospeech", - sha256 = "47fd557bca4ad5f4e8dff734c323a24a03253d19d2fcb693c9f3bd6ad3c15cd3", - strip_prefix = "cloud.google.com/go/texttospeech@v1.6.0", + sha256 = "4a897af8724879bf479f715a57d0894f6fa3b52706e35870c385bcaa799aef2f", + strip_prefix = "cloud.google.com/go/texttospeech@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/texttospeech/com_google_cloud_go_texttospeech-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_tpu", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/tpu", - sha256 = "631fdef221fa6e2374bc43fabd37de734b402e6cc04449d095a6ddc8a1f64303", - strip_prefix = "cloud.google.com/go/tpu@v1.5.0", + sha256 = "48e359c9edd853357bb8f157a4ead1601d9c926b1c539fde86b5531139f60647", + strip_prefix = "cloud.google.com/go/tpu@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/tpu/com_google_cloud_go_tpu-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_trace", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/trace", - sha256 = "8012eaad65d2aa6dca225c708e6b0b43eb91bfc1c7dc82573fe7d993eb2c4384", - strip_prefix = "cloud.google.com/go/trace@v1.9.0", + sha256 = "40dd16a13c80f021b31ee309e80f6ee21323b67b2d3aac8473717ac3b3efce08", + strip_prefix = "cloud.google.com/go/trace@v1.10.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.9.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.9.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.9.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.9.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.10.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.10.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.10.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/trace/com_google_cloud_go_trace-v1.10.2.zip", ], ) go_repository( name = "com_google_cloud_go_translate", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/translate", - sha256 = "2bbf1bd793abf22ec8b0b200e8b49ea08821b1923ed24ffa668999f7330046fa", - strip_prefix = "cloud.google.com/go/translate@v1.7.0", + sha256 = "1776353be213f41195b9da35ae6f39cff060f9c163a0213711d7cb11e4f067ff", + strip_prefix = "cloud.google.com/go/translate@v1.9.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.9.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.9.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.9.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/translate/com_google_cloud_go_translate-v1.9.1.zip", ], ) go_repository( name = "com_google_cloud_go_video", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/video", - sha256 = "fac96bb5bb2dafb9d19c6b3e70455999c65f2be1f4a0ee86c7772796fcbf660c", - strip_prefix = "cloud.google.com/go/video@v1.15.0", + sha256 = "758131934c35db8aa8d39efa5ce411785da78dd55d82edbb7a9fcb8e9518d2a9", + strip_prefix = "cloud.google.com/go/video@v1.20.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.15.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.15.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.15.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.15.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.20.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.20.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.20.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/video/com_google_cloud_go_video-v1.20.1.zip", ], ) go_repository( name = "com_google_cloud_go_videointelligence", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/videointelligence", - sha256 = "d7a24a20e8f4c0b7dc088010263be03132f63f62dbfa9eb69447c229ef80626b", - strip_prefix = "cloud.google.com/go/videointelligence@v1.10.0", + sha256 = "0ca9d8c68825f07c208157bf24503f4a64aec960efe3ea2ff8c1ce2dac92b351", + strip_prefix = "cloud.google.com/go/videointelligence@v1.11.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.11.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.11.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.11.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/videointelligence/com_google_cloud_go_videointelligence-v1.11.2.zip", ], ) go_repository( name = "com_google_cloud_go_vision_v2", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/vision/v2", - sha256 = "323f1c5e07ea11ee90bec85c0fdccbcf73c26ce28baa832528cf4a9c50d0b4f7", - strip_prefix = "cloud.google.com/go/vision/v2@v2.7.0", + sha256 = "89b8e20f0db42816208b1d9c5a6cb1abd276fee95fbdd563e750da588d110464", + strip_prefix = "cloud.google.com/go/vision/v2@v2.7.3", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.3.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.3.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.3.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vision/v2/com_google_cloud_go_vision_v2-v2.7.3.zip", ], ) go_repository( name = "com_google_cloud_go_vmmigration", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/vmmigration", - sha256 = "a289f09b2e6249b493e3ae8bb10225d77590f3823302e46a99ea51b732debb65", - strip_prefix = "cloud.google.com/go/vmmigration@v1.6.0", + sha256 = "8ef0ba7a5fa6b436593782de63111e4274cb61267008bff10c9dc90285405dce", + strip_prefix = "cloud.google.com/go/vmmigration@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vmmigration/com_google_cloud_go_vmmigration-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_vmwareengine", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/vmwareengine", - sha256 = "f6f5753bf4ee0c4264f78a78966f019fd200bb5bae79fad321093a439b08a2b6", - strip_prefix = "cloud.google.com/go/vmwareengine@v0.3.0", + sha256 = "906ad046857c81df8a0e8f30d09f3db9d2c13021a3374587d3acd2a734c60a13", + strip_prefix = "cloud.google.com/go/vmwareengine@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v0.3.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v0.3.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v0.3.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v0.3.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v1.0.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vmwareengine/com_google_cloud_go_vmwareengine-v1.0.1.zip", ], ) go_repository( name = "com_google_cloud_go_vpcaccess", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/vpcaccess", - sha256 = "8d0662362ec347afedf274930c139afd0c9cdb219646ceb58a07668c5c84278b", - strip_prefix = "cloud.google.com/go/vpcaccess@v1.6.0", + sha256 = "80207274d8a780413505c4efdd881c5798d988ade2bc647ac803d18831a42250", + strip_prefix = "cloud.google.com/go/vpcaccess@v1.7.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.6.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.6.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.6.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.6.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.7.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.7.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.7.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/vpcaccess/com_google_cloud_go_vpcaccess-v1.7.2.zip", ], ) go_repository( name = "com_google_cloud_go_webrisk", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/webrisk", - sha256 = "8cc27cca95d2dd5efc58f335b085da8b46d6520a1963f6b2a33676f2837f3553", - strip_prefix = "cloud.google.com/go/webrisk@v1.8.0", + sha256 = "f1b45df15670274eae77a62a7ae243b5eafb4e10f8f04c852ca73a026b9d03f7", + strip_prefix = "cloud.google.com/go/webrisk@v1.9.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.8.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.8.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.9.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.9.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.9.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/webrisk/com_google_cloud_go_webrisk-v1.9.2.zip", ], ) go_repository( name = "com_google_cloud_go_websecurityscanner", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/websecurityscanner", - sha256 = "7f0774556cb41ac4acd16a386a9f8664c7f0ac11ed126d5d771fe07a217ef131", - strip_prefix = "cloud.google.com/go/websecurityscanner@v1.5.0", + sha256 = "ce37d83c1998f0dde1ca5b8e107a8654466271fda7c9b35614672da9d8a33144", + strip_prefix = "cloud.google.com/go/websecurityscanner@v1.6.2", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.5.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.5.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.5.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.5.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.6.2.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.6.2.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.6.2.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/websecurityscanner/com_google_cloud_go_websecurityscanner-v1.6.2.zip", ], ) go_repository( name = "com_google_cloud_go_workflows", build_file_proto_mode = "disable_global", importpath = "cloud.google.com/go/workflows", - sha256 = "e6e83869c5fbcccd3ee489128a300b75cb02a99b48b59bbb829b2e7d7ab81f9c", - strip_prefix = "cloud.google.com/go/workflows@v1.10.0", + sha256 = "b8de38a09b26fc4a98a10a8370f4780361c27a13cc84244fcf8840c2ca0f402a", + strip_prefix = "cloud.google.com/go/workflows@v1.12.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.10.0.zip", - "http://ats.apps.svc/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.10.0.zip", - "https://cache.hawkingrei.com/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.10.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.10.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.12.1.zip", + "http://ats.apps.svc/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.12.1.zip", + "https://cache.hawkingrei.com/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.12.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/cloud.google.com/go/workflows/com_google_cloud_go_workflows-v1.12.1.zip", ], ) go_repository( @@ -9602,13 +9498,13 @@ def go_deps(): name = "io_etcd_go_bbolt", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/bbolt", - sha256 = "a357fccd93e865dce3d3859ed857ce827f7a2f2dc5b90cfaa95202f5d76e4ac2", - strip_prefix = "go.etcd.io/bbolt@v1.3.6", + sha256 = "18babae67eccdd2982ad0bd44bb77a238e8b6c8da192b5ae6bd3c0dd48d5ba31", + strip_prefix = "go.etcd.io/bbolt@v1.3.8", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.6.zip", - "http://ats.apps.svc/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.6.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.6.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.6.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.8.zip", + "http://ats.apps.svc/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.8.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.8.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/bbolt/io_etcd_go_bbolt-v1.3.8.zip", ], ) go_repository( @@ -9619,117 +9515,130 @@ def go_deps(): patches = [ "//build/patches:io_etcd_go_etcd_api_v3.patch", ], - sha256 = "bfd9ce626389c8a11c2d33eb3c823cc277898c51254a6e02ed967f948aec79f6", - strip_prefix = "go.etcd.io/etcd/api/v3@v3.5.2", + sha256 = "d05d41beae43dc75f0a6e7815a447c0e704cfdf94841e96ba661e0b1dbc4a10c", + strip_prefix = "go.etcd.io/etcd/api/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/api/v3/io_etcd_go_etcd_api_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_client_pkg_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/client/pkg/v3", - sha256 = "b183c377b46eb622d80d77b14755acbdbba43b9b5882ed2a5e9975985eaacd25", - strip_prefix = "go.etcd.io/etcd/client/pkg/v3@v3.5.2", + sha256 = "d99c95cd67a6c27868368cb7d31f60f11894d9039bea0c81b9ab66540f01d524", + strip_prefix = "go.etcd.io/etcd/client/pkg/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/pkg/v3/io_etcd_go_etcd_client_pkg_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_client_v2", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/client/v2", - sha256 = "25e0a2e179114cdc122e57dcee974cff927cbe2f04304d71575fe0dbf66d506b", - strip_prefix = "go.etcd.io/etcd/client/v2@v2.305.2", + sha256 = "cb78469abc82a73bf8116cae2d772791065a28662f19771fb3504804896f9cc3", + strip_prefix = "go.etcd.io/etcd/client/v2@v2.305.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/v2/io_etcd_go_etcd_client_v2-v2.305.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_client_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/client/v3", - sha256 = "06aae6f25789a7dea98a2f7df67a4d65b660b81a8accd88ddced9ca8c335d99d", - strip_prefix = "go.etcd.io/etcd/client/v3@v3.5.2", + sha256 = "f35f571c1c46260bfed9222de88d7e87b1cd5de625465b4489f97af1b3a2c881", + strip_prefix = "go.etcd.io/etcd/client/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/client/v3/io_etcd_go_etcd_client_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_etcdutl_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/etcdutl/v3", - sha256 = "9d694d9b204037b05d13c6897a3b81a8234cc444e9b9892846a79a3ade72aeab", - strip_prefix = "go.etcd.io/etcd/etcdutl/v3@v3.5.2", + sha256 = "fbd00834b99644e90ec3f1594bb9901ef2befc2e0b2e957be9605d7e12ca6743", + strip_prefix = "go.etcd.io/etcd/etcdutl/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/etcdutl/v3/io_etcd_go_etcd_etcdutl_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_pkg_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/pkg/v3", - sha256 = "a1d96686d541509919732896d79e885e40147b5eeb8315db58dc07ad8c191226", - strip_prefix = "go.etcd.io/etcd/pkg/v3@v3.5.2", + sha256 = "8b7c52c59bd9e6b80df28816410846ec61b4318a551c55d9c8fa58b40c0da6f5", + strip_prefix = "go.etcd.io/etcd/pkg/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/pkg/v3/io_etcd_go_etcd_pkg_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_raft_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/raft/v3", - sha256 = "2b1fdd35d496af817cfe06ff74949e3cc77efac3473f817f998569107162d41a", - strip_prefix = "go.etcd.io/etcd/raft/v3@v3.5.2", + sha256 = "2ca38be08a7beb77633685d31e18631c0f57b403e41455f524e60a7f2549201f", + strip_prefix = "go.etcd.io/etcd/raft/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/raft/v3/io_etcd_go_etcd_raft_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_server_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/server/v3", - sha256 = "7eac7dcb18c57f880830d363ab250f9b387c0cbed3e4910427b8e23b7d8e28d3", - strip_prefix = "go.etcd.io/etcd/server/v3@v3.5.2", + sha256 = "a1112d8570540017f35d9ed372ff1dde75c59ee1fd7f20074e377ffc90ebd103", + strip_prefix = "go.etcd.io/etcd/server/v3@v3.5.10", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/server/v3/io_etcd_go_etcd_server_v3-v3.5.10.zip", ], ) go_repository( name = "io_etcd_go_etcd_tests_v3", build_file_proto_mode = "disable_global", importpath = "go.etcd.io/etcd/tests/v3", - sha256 = "fc00d13163948f7633e1f53f08d05ee4e75930d02114754384a736f733d35148", - strip_prefix = "go.etcd.io/etcd/tests/v3@v3.5.2", + sha256 = "23bc94e86126c8909155ee770d0b9e42f6260fdc97a8f8355f365e9995c4c559", + strip_prefix = "go.etcd.io/etcd/tests/v3@v3.5.10", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.10.zip", + "http://ats.apps.svc/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.10.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.10.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.10.zip", + ], + ) + go_repository( + name = "io_etcd_go_gofail", + build_file_proto_mode = "disable_global", + importpath = "go.etcd.io/gofail", + sha256 = "4fd6977dd736aba56be58c0b16e96d73433688976a5b352578d3c54d0db9e803", + strip_prefix = "go.etcd.io/gofail@v0.1.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.2.zip", - "http://ats.apps.svc/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.2.zip", - "https://cache.hawkingrei.com/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.2.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/etcd/tests/v3/io_etcd_go_etcd_tests_v3-v3.5.2.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.etcd.io/gofail/io_etcd_go_gofail-v0.1.0.zip", + "http://ats.apps.svc/gomod/go.etcd.io/gofail/io_etcd_go_gofail-v0.1.0.zip", + "https://cache.hawkingrei.com/gomod/go.etcd.io/gofail/io_etcd_go_gofail-v0.1.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.etcd.io/gofail/io_etcd_go_gofail-v0.1.0.zip", ], ) go_repository( @@ -9888,147 +9797,95 @@ def go_deps(): "https://storage.googleapis.com/pingcapmirror/gomod/contrib.go.opencensus.io/exporter/ocagent/io_opencensus_go_contrib_exporter_ocagent-v0.4.12.zip", ], ) - go_repository( - name = "io_opentelemetry_go_contrib", - build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/contrib", - sha256 = "b33252dafaa7884e1925ca052bfc32275bd69f7faa1a294ce2dbf05b7f62fda1", - strip_prefix = "go.opentelemetry.io/contrib@v0.20.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/contrib/io_opentelemetry_go_contrib-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/contrib/io_opentelemetry_go_contrib-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/contrib/io_opentelemetry_go_contrib-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/contrib/io_opentelemetry_go_contrib-v0.20.0.zip", - ], - ) go_repository( name = "io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc", - sha256 = "5d75e50405735d05540a3cc59c3741cc43275ba9203bcc77ac85214ebd5212f8", - strip_prefix = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.20.0", + sha256 = "f7abd5e3fe79b49a398912f67f79d853e329bb4f653b98804a961d5178dadc5e", + strip_prefix = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc@v0.25.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.25.0.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.25.0.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.25.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/io_opentelemetry_go_contrib_instrumentation_google_golang_org_grpc_otelgrpc-v0.25.0.zip", ], ) go_repository( name = "io_opentelemetry_go_otel", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/otel", - sha256 = "8e55c823cde41ae4920f331e3b3999adca4c8729f0f096950454c996520972a3", - strip_prefix = "go.opentelemetry.io/otel@v0.20.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v0.20.0.zip", - ], - ) - go_repository( - name = "io_opentelemetry_go_otel_exporters_otlp", - build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/exporters/otlp", - sha256 = "abd40ffff96f3caa01ee6854b52e69e6787b10d31a6c2023447d5106496c9b2e", - strip_prefix = "go.opentelemetry.io/otel/exporters/otlp@v0.20.0", + sha256 = "e8c4d785d6a230d5c954d7afbbb0df5c8a2ffb59aeb07bc4f7c731c6f55e0626", + strip_prefix = "go.opentelemetry.io/otel@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/exporters/otlp/io_opentelemetry_go_otel_exporters_otlp-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/exporters/otlp/io_opentelemetry_go_otel_exporters_otlp-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/exporters/otlp/io_opentelemetry_go_otel_exporters_otlp-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/exporters/otlp/io_opentelemetry_go_otel_exporters_otlp-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/io_opentelemetry_go_otel-v1.0.1.zip", ], ) go_repository( - name = "io_opentelemetry_go_otel_metric", + name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace", build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/metric", - sha256 = "d7ae3abbdcf9ea48ff23a477f324cb3595c77f3eb83f6acde5c0c9300e23fedb", - strip_prefix = "go.opentelemetry.io/otel/metric@v0.20.0", + importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace", + sha256 = "c0b373451618d70053fcfad5acbdc243cbad8b6b9252e0a30303171b0b065499", + strip_prefix = "go.opentelemetry.io/otel/exporters/otlp/otlptrace@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/metric/io_opentelemetry_go_otel_metric-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/io_opentelemetry_go_otel_exporters_otlp_otlptrace-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/io_opentelemetry_go_otel_exporters_otlp_otlptrace-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/io_opentelemetry_go_otel_exporters_otlp_otlptrace-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/io_opentelemetry_go_otel_exporters_otlp_otlptrace-v1.0.1.zip", ], ) go_repository( - name = "io_opentelemetry_go_otel_oteltest", + name = "io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc", build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/oteltest", - sha256 = "5773e674e2f095c2348d13133d2c5ed3019c3c4dc43c47dcae788a673f197d20", - strip_prefix = "go.opentelemetry.io/otel/oteltest@v0.20.0", + importpath = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc", + sha256 = "7e99951c02fdc104a08bff9244de6f9129171ccde70761c61c9f4255ce81dc5d", + strip_prefix = "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/oteltest/io_opentelemetry_go_otel_oteltest-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/oteltest/io_opentelemetry_go_otel_oteltest-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/oteltest/io_opentelemetry_go_otel_oteltest-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/oteltest/io_opentelemetry_go_otel_oteltest-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc-v1.0.1.zip", ], ) go_repository( name = "io_opentelemetry_go_otel_sdk", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/otel/sdk", - sha256 = "13c01e92ebcbde0b3d2efc4d3a4445c2cce8d505c823aeffff6398a7dabb3806", - strip_prefix = "go.opentelemetry.io/otel/sdk@v0.20.0", + sha256 = "760e9297c941b22cd3a5a2b217de46f8f2411cc7ef8dc1bab8ed02d75e10217d", + strip_prefix = "go.opentelemetry.io/otel/sdk@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v0.20.0.zip", - ], - ) - go_repository( - name = "io_opentelemetry_go_otel_sdk_export_metric", - build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/sdk/export/metric", - sha256 = "e0037e543d27111d06904f8a2060b41fb40e960ddce5cec5e6f190490ae52f57", - strip_prefix = "go.opentelemetry.io/otel/sdk/export/metric@v0.20.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/sdk/export/metric/io_opentelemetry_go_otel_sdk_export_metric-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/sdk/export/metric/io_opentelemetry_go_otel_sdk_export_metric-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/sdk/export/metric/io_opentelemetry_go_otel_sdk_export_metric-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/sdk/export/metric/io_opentelemetry_go_otel_sdk_export_metric-v0.20.0.zip", - ], - ) - go_repository( - name = "io_opentelemetry_go_otel_sdk_metric", - build_file_proto_mode = "disable_global", - importpath = "go.opentelemetry.io/otel/sdk/metric", - sha256 = "b0d5ffded967229eeee79bb9fb50320c68af812d5f2e6dcb9e44ddb7bd2afe16", - strip_prefix = "go.opentelemetry.io/otel/sdk/metric@v0.20.0", - urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/sdk/metric/io_opentelemetry_go_otel_sdk_metric-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/sdk/metric/io_opentelemetry_go_otel_sdk_metric-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/sdk/metric/io_opentelemetry_go_otel_sdk_metric-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/sdk/metric/io_opentelemetry_go_otel_sdk_metric-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/sdk/io_opentelemetry_go_otel_sdk-v1.0.1.zip", ], ) go_repository( name = "io_opentelemetry_go_otel_trace", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/otel/trace", - sha256 = "fd6a9646a66f0fa98fc2b12eed1abe11220e5e6cc0cb4b8d9c5905631c87608d", - strip_prefix = "go.opentelemetry.io/otel/trace@v0.20.0", + sha256 = "d7f303c3e1a9d844121309c132fab5f99dc68e9ac6518ef1d9c44f92ae9e97ea", + strip_prefix = "go.opentelemetry.io/otel/trace@v1.0.1", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v0.20.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v0.20.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v0.20.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v0.20.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v1.0.1.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v1.0.1.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v1.0.1.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/otel/trace/io_opentelemetry_go_otel_trace-v1.0.1.zip", ], ) go_repository( name = "io_opentelemetry_go_proto_otlp", build_file_proto_mode = "disable_global", importpath = "go.opentelemetry.io/proto/otlp", - sha256 = "a7db0590bc4c5f0b9b99cc958decf644f1e5cc11e0b995dc20b3583a2215259b", - strip_prefix = "go.opentelemetry.io/proto/otlp@v0.7.0", + sha256 = "1a91376c923da07bee23439e8430c32736f6330532df85d3bd1ada90305097d7", + strip_prefix = "go.opentelemetry.io/proto/otlp@v0.9.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.7.0.zip", - "http://ats.apps.svc/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.7.0.zip", - "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.7.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.7.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.9.0.zip", + "http://ats.apps.svc/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.9.0.zip", + "https://cache.hawkingrei.com/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.9.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.opentelemetry.io/proto/otlp/io_opentelemetry_go_proto_otlp-v0.9.0.zip", ], ) go_repository( @@ -10100,13 +9957,13 @@ def go_deps(): name = "org_golang_google_api", build_file_proto_mode = "disable_global", importpath = "google.golang.org/api", - sha256 = "42c62aaba1d76efede08c70d8aef7889c5c8ee9c9c4f1e7c455b07838cabb785", - strip_prefix = "google.golang.org/api@v0.114.0", + sha256 = "1c7547012d828329aa90dc77bfa7d826184b14229cc72c93eeca50cb9882158d", + strip_prefix = "google.golang.org/api@v0.128.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/api/org_golang_google_api-v0.114.0.zip", - "http://ats.apps.svc/gomod/google.golang.org/api/org_golang_google_api-v0.114.0.zip", - "https://cache.hawkingrei.com/gomod/google.golang.org/api/org_golang_google_api-v0.114.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/api/org_golang_google_api-v0.114.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/api/org_golang_google_api-v0.128.0.zip", + "http://ats.apps.svc/gomod/google.golang.org/api/org_golang_google_api-v0.128.0.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/api/org_golang_google_api-v0.128.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/api/org_golang_google_api-v0.128.0.zip", ], ) go_repository( @@ -10126,26 +9983,65 @@ def go_deps(): name = "org_golang_google_genproto", build_file_proto_mode = "disable_global", importpath = "google.golang.org/genproto", - sha256 = "28f0317e6948788a33c07698109005675062f0203ed06bc866350a575bc974bf", - strip_prefix = "google.golang.org/genproto@v0.0.0-20230410155749-daa745c078e1", + sha256 = "11c4f5d4c24c25c9dec4fb2d4e723dead4c558bea81ab3b2deb3b1f5f98f278a", + strip_prefix = "google.golang.org/genproto@v0.0.0-20231016165738-49dd2c1f3d0b", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "http://ats.apps.svc/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + ], + ) + go_repository( + name = "org_golang_google_genproto_googleapis_api", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/api", + sha256 = "26f62026bcd267b4fbde3bd16aa9334568af09e623fd64a8e7cd8ec99c0dbf5d", + strip_prefix = "google.golang.org/genproto/googleapis/api@v0.0.0-20231016165738-49dd2c1f3d0b", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/googleapis/api/org_golang_google_genproto_googleapis_api-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "http://ats.apps.svc/gomod/google.golang.org/genproto/googleapis/api/org_golang_google_genproto_googleapis_api-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/googleapis/api/org_golang_google_genproto_googleapis_api-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/googleapis/api/org_golang_google_genproto_googleapis_api-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + ], + ) + go_repository( + name = "org_golang_google_genproto_googleapis_bytestream", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/bytestream", + sha256 = "cab1c8c198b4c5a226590e8b5b1b847e505a7aaf10a0145ce8c29951eca86b6a", + strip_prefix = "google.golang.org/genproto/googleapis/bytestream@v0.0.0-20230530153820-e85fd2cbaebc", + urls = [ + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/googleapis/bytestream/org_golang_google_genproto_googleapis_bytestream-v0.0.0-20230530153820-e85fd2cbaebc.zip", + "http://ats.apps.svc/gomod/google.golang.org/genproto/googleapis/bytestream/org_golang_google_genproto_googleapis_bytestream-v0.0.0-20230530153820-e85fd2cbaebc.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/googleapis/bytestream/org_golang_google_genproto_googleapis_bytestream-v0.0.0-20230530153820-e85fd2cbaebc.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/googleapis/bytestream/org_golang_google_genproto_googleapis_bytestream-v0.0.0-20230530153820-e85fd2cbaebc.zip", + ], + ) + go_repository( + name = "org_golang_google_genproto_googleapis_rpc", + build_file_proto_mode = "disable_global", + importpath = "google.golang.org/genproto/googleapis/rpc", + sha256 = "b35528074783811faaaec1d36c8f42d88b30878e509c5f407c53cb83ec02af78", + strip_prefix = "google.golang.org/genproto/googleapis/rpc@v0.0.0-20231016165738-49dd2c1f3d0b", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20230410155749-daa745c078e1.zip", - "http://ats.apps.svc/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20230410155749-daa745c078e1.zip", - "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20230410155749-daa745c078e1.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/org_golang_google_genproto-v0.0.0-20230410155749-daa745c078e1.zip", + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/genproto/googleapis/rpc/org_golang_google_genproto_googleapis_rpc-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "http://ats.apps.svc/gomod/google.golang.org/genproto/googleapis/rpc/org_golang_google_genproto_googleapis_rpc-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/genproto/googleapis/rpc/org_golang_google_genproto_googleapis_rpc-v0.0.0-20231016165738-49dd2c1f3d0b.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/genproto/googleapis/rpc/org_golang_google_genproto_googleapis_rpc-v0.0.0-20231016165738-49dd2c1f3d0b.zip", ], ) go_repository( name = "org_golang_google_grpc", build_file_proto_mode = "disable_global", importpath = "google.golang.org/grpc", - sha256 = "8e279a7a36347098a00debb5f76ef75b981939c282cd7771cc22b9b576065d84", - strip_prefix = "google.golang.org/grpc@v1.54.0", + sha256 = "8d8be58b73bcbefd731939880edd32aa3a90c4b6937ce07d904075470fce3565", + strip_prefix = "google.golang.org/grpc@v1.59.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.54.0.zip", - "http://ats.apps.svc/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.54.0.zip", - "https://cache.hawkingrei.com/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.54.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.54.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.59.0.zip", + "http://ats.apps.svc/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.59.0.zip", + "https://cache.hawkingrei.com/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.59.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/google.golang.org/grpc/org_golang_google_grpc-v1.59.0.zip", ], ) go_repository( @@ -10217,13 +10113,13 @@ def go_deps(): name = "org_golang_x_lint", build_file_proto_mode = "disable_global", importpath = "golang.org/x/lint", - sha256 = "0a4a5ebd2b1d79e7f480cbf5a54b45a257ae1ec9d11f01688efc5c35268d4603", - strip_prefix = "golang.org/x/lint@v0.0.0-20210508222113-6edffad5e616", + sha256 = "4620205ccd1fd5c5ced7ccbc264217f407c53924e847f4219e48c04c7480b294", + strip_prefix = "golang.org/x/lint@v0.0.0-20201208152925-83fdc39ff7b5", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip", - "http://ats.apps.svc/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip", - "https://cache.hawkingrei.com/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20210508222113-6edffad5e616.zip", + "http://bazel-cache.pingcap.net:8080/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20201208152925-83fdc39ff7b5.zip", + "http://ats.apps.svc/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20201208152925-83fdc39ff7b5.zip", + "https://cache.hawkingrei.com/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20201208152925-83fdc39ff7b5.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/golang.org/x/lint/org_golang_x_lint-v0.0.0-20201208152925-83fdc39ff7b5.zip", ], ) go_repository( @@ -10269,13 +10165,13 @@ def go_deps(): name = "org_golang_x_oauth2", build_file_proto_mode = "disable_global", importpath = "golang.org/x/oauth2", - sha256 = "774ad761b3732b86eaa3d70c30bcaed6dd09e96eec3cdeb2c0a9c112ce168704", - strip_prefix = "golang.org/x/oauth2@v0.8.0", + sha256 = "06f9bc67776baba78ae443744f846c193e68d775b3339b630788cca03882dda7", + strip_prefix = "golang.org/x/oauth2@v0.11.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.8.0.zip", - "http://ats.apps.svc/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.8.0.zip", - "https://cache.hawkingrei.com/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.8.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.8.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.11.0.zip", + "http://ats.apps.svc/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.11.0.zip", + "https://cache.hawkingrei.com/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.11.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/golang.org/x/oauth2/org_golang_x_oauth2-v0.11.0.zip", ], ) go_repository( @@ -10529,13 +10425,13 @@ def go_deps(): name = "org_uber_go_mock", build_file_proto_mode = "disable_global", importpath = "go.uber.org/mock", - sha256 = "df840a589119d0c1966e3f8888fb6b6a05b4aa793b1074c3fd4c4a508e0b0e3a", - strip_prefix = "go.uber.org/mock@v0.2.0", + sha256 = "3520cddd6a3fc4d72a5cedb293508cf68ae2fcb0147f038fed8c0d6fd526880c", + strip_prefix = "go.uber.org/mock@v0.3.0", urls = [ - "http://bazel-cache.pingcap.net:8080/gomod/go.uber.org/mock/org_uber_go_mock-v0.2.0.zip", - "http://ats.apps.svc/gomod/go.uber.org/mock/org_uber_go_mock-v0.2.0.zip", - "https://cache.hawkingrei.com/gomod/go.uber.org/mock/org_uber_go_mock-v0.2.0.zip", - "https://storage.googleapis.com/pingcapmirror/gomod/go.uber.org/mock/org_uber_go_mock-v0.2.0.zip", + "http://bazel-cache.pingcap.net:8080/gomod/go.uber.org/mock/org_uber_go_mock-v0.3.0.zip", + "http://ats.apps.svc/gomod/go.uber.org/mock/org_uber_go_mock-v0.3.0.zip", + "https://cache.hawkingrei.com/gomod/go.uber.org/mock/org_uber_go_mock-v0.3.0.zip", + "https://storage.googleapis.com/pingcapmirror/gomod/go.uber.org/mock/org_uber_go_mock-v0.3.0.zip", ], ) go_repository( diff --git a/Makefile b/Makefile index 6aeb28e8777de..84c60380f0376 100644 --- a/Makefile +++ b/Makefile @@ -191,9 +191,9 @@ else endif enterprise-server: - @make init-submodule - @make enterprise-prepare - @make enterprise-server-build + $(MAKE) init-submodule + $(MAKE) enterprise-prepare + $(MAKE) enterprise-server-build server_check: ifeq ($(TARGET), "") @@ -568,6 +568,30 @@ bazel_addindextest: failpoint-enable bazel_ci_simple_prepare -- //tests/realtikvtest/addindextest/... ./build/jenkins_collect_coverage.sh +bazel_addindextest1: failpoint-enable bazel_ci_simple_prepare + bazel $(BAZEL_GLOBAL_CONFIG) coverage $(BAZEL_CMD_CONFIG) --test_arg=-with-real-tikv --define gotags=deadlock,intest \ + --@io_bazel_rules_go//go/config:cover_format=go_cover \ + -- //tests/realtikvtest/addindextest1/... + ./build/jenkins_collect_coverage.sh + +bazel_addindextest2: failpoint-enable bazel_ci_simple_prepare + bazel $(BAZEL_GLOBAL_CONFIG) coverage $(BAZEL_CMD_CONFIG) --test_arg=-with-real-tikv --define gotags=deadlock,intest \ + --@io_bazel_rules_go//go/config:cover_format=go_cover \ + -- //tests/realtikvtest/addindextest2/... + ./build/jenkins_collect_coverage.sh + +bazel_addindextest3: failpoint-enable bazel_ci_simple_prepare + bazel $(BAZEL_GLOBAL_CONFIG) coverage $(BAZEL_CMD_CONFIG) --test_arg=-with-real-tikv --define gotags=deadlock,intest \ + --@io_bazel_rules_go//go/config:cover_format=go_cover \ + -- //tests/realtikvtest/addindextest3/... + ./build/jenkins_collect_coverage.sh + +bazel_addindextest4: failpoint-enable bazel_ci_simple_prepare + bazel $(BAZEL_GLOBAL_CONFIG) coverage $(BAZEL_CMD_CONFIG) --test_arg=-with-real-tikv --define gotags=deadlock,intest \ + --@io_bazel_rules_go//go/config:cover_format=go_cover \ + -- //tests/realtikvtest/addindextest4/... + ./build/jenkins_collect_coverage.sh + # on timeout, bazel won't print log sometimes, so we use --test_output=all to print log always bazel_importintotest: failpoint-enable bazel_ci_simple_prepare bazel $(BAZEL_GLOBAL_CONFIG) coverage $(BAZEL_CMD_CONFIG) --test_output=all --test_arg=-with-real-tikv --define gotags=deadlock,intest \ diff --git a/OWNERS b/OWNERS index 02f9d485898d8..f1fa925b86f65 100644 --- a/OWNERS +++ b/OWNERS @@ -123,6 +123,7 @@ reviewers: - fixdb - fzzf678 - iamxy + - jiyfhust - JmPotato - js00070 - lamxTyler diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 1aa64cefdb9e4..dce8bff5562d3 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -26,6 +26,10 @@ aliases: - windtalker - XuHuaiyu - zanmato1984 + sig-approvers-executor-import: # approvers for importer part of executor pkg. + - D3Hunter + - gmhdbjd + - lance6716 sig-approvers-expression: # approvers for expression pkg - windtalker - XuHuaiyu @@ -44,3 +48,70 @@ aliases: - lance6716 - tangenta - wjhuang2016 + sig-approvers-ddl: # approvers for ddl pkg + - Benjamin2037 + - tangenta + - wjhuang2016 + - ywqzzy + - zimulala + sig-approvers-disttask: # approvers for disttask pkg + - Benjamin2037 + - D3Hunter + - gmhdbjd + - lance6716 + - tangenta + - wjhuang2016 + - ywqzzy + sig-approvers-dumpling: # approvers for dumpling module + - Benjamin2037 + - gmhdbjd + - lichunzhu + - okJiang + sig-approvers-infoschema: # approvers for infoschema pkg + - zimulala + - wjhuang2016 + - tangenta + - ywqzzy + - D3Hunter + - Benjamin2037 + - gmhdbjd + sig-approvers-meta: # approvers for meta pkg + - Benjamin2037 + - gmhdbjd + - tangenta + - wjhuang2016 + - ywqzzy + - zimulala + sig-approvers-owner: # approvers for `owner` pkg + - Benjamin2037 + - lichunzhu + - tangenta + - wjhuang2016 + - ywqzzy + - zimulala + sig-approvers-resourcemanager: # approvers for resourcemanager pkg + - Benjamin2037 + - D3Hunter + - gmhdbjd + - lance6716 + - tangenta + - wjhuang2016 + - ywqzzy + sig-approvers-table: # approvers for table packages. + - Benjamin2037 + - cfzjywxk + - gmhdbjd + - tangenta + - wjhuang2016 + - ywqzzy + - zimulala + sig-approvers-lock: # approvers for lock pkg + - Benjamin2037 + - lance6716 + - tangenta + - wjhuang2016 + - zimulala + sig-approvers-tidb-binlog: # approvers for tidb-binlog module + - Benjamin2037 + - gmhdbjd + - lichunzhu diff --git a/br/OWNERS b/br/OWNERS index 1de5f79e4543a..6e9a6f3f3aa22 100644 --- a/br/OWNERS +++ b/br/OWNERS @@ -2,7 +2,7 @@ options: no_parent_owners: true filters: - "(tidb-lightning\\.tom)$": + "(tidb-lightning\\.toml)$": approvers: - sig-critical-approvers-tidb-lightning ".*": diff --git a/br/pkg/aws/ebs.go b/br/pkg/aws/ebs.go index ddea6b358f556..cf5425e03be0d 100644 --- a/br/pkg/aws/ebs.go +++ b/br/pkg/aws/ebs.go @@ -281,6 +281,152 @@ func (e *EC2Session) DeleteSnapshots(snapIDMap map[string]string) { log.Info("delete snapshot end", zap.Int("need-to-del", len(snapIDMap)), zap.Int32("deleted", deletedCnt.Load())) } +// EnableDataFSR enables FSR for data volume snapshots +func (e *EC2Session) EnableDataFSR(meta *config.EBSBasedBRMeta, targetAZ string) (map[string][]*string, error) { + snapshotsIDsMap := fetchTargetSnapshots(meta, targetAZ) + + if len(snapshotsIDsMap) == 0 { + return snapshotsIDsMap, errors.Errorf("empty backup meta") + } + + eg, _ := errgroup.WithContext(context.Background()) + + for availableZone := range snapshotsIDsMap { + targetAZ := availableZone + eg.Go(func() error { + log.Info("enable fsr for snapshots", zap.String("available zone", targetAZ)) + resp, err := e.ec2.EnableFastSnapshotRestores(&ec2.EnableFastSnapshotRestoresInput{ + AvailabilityZones: []*string{&targetAZ}, + SourceSnapshotIds: snapshotsIDsMap[targetAZ], + }) + + if err != nil { + return errors.Trace(err) + } + + if len(resp.Unsuccessful) > 0 { + log.Warn("not all snapshots enabled FSR") + return errors.Errorf("Some snapshot fails to enable FSR for available zone %s, such as %s, error code is %v", targetAZ, *resp.Unsuccessful[0].SnapshotId, resp.Unsuccessful[0].FastSnapshotRestoreStateErrors) + } + + return e.waitDataFSREnabled(snapshotsIDsMap[targetAZ], targetAZ) + }) + } + return snapshotsIDsMap, eg.Wait() +} + +// waitDataFSREnabled waits FSR for data volume snapshots are all enabled +func (e *EC2Session) waitDataFSREnabled(snapShotIDs []*string, targetAZ string) error { + // Create a map to store the strings as keys + pendingSnapshots := make(map[string]struct{}) + + // Populate the map with the strings from the array + for _, str := range snapShotIDs { + pendingSnapshots[*str] = struct{}{} + } + + log.Info("starts check fsr pending snapshots", zap.Any("snapshots", pendingSnapshots), zap.String("available zone", targetAZ)) + for { + if len(pendingSnapshots) == 0 { + log.Info("all snapshots fsr enablement is finished", zap.String("available zone", targetAZ)) + return nil + } + + // check pending snapshots every 1 minute + time.Sleep(1 * time.Minute) + log.Info("check snapshots not fsr enabled", zap.Int("count", len(pendingSnapshots))) + input := &ec2.DescribeFastSnapshotRestoresInput{ + Filters: []*ec2.Filter{ + { + Name: aws.String("state"), + Values: []*string{aws.String("disabled"), aws.String("disabling"), aws.String("enabling"), aws.String("optimizing")}, + }, + { + Name: aws.String("availability-zone"), + Values: []*string{aws.String(targetAZ)}, + }, + }, + } + + result, err := e.ec2.DescribeFastSnapshotRestores(input) + if err != nil { + return errors.Trace(err) + } + + uncompletedSnapshots := make(map[string]struct{}) + for _, fastRestore := range result.FastSnapshotRestores { + _, found := pendingSnapshots[*fastRestore.SnapshotId] + if found { + // Detect some conflict states + if strings.EqualFold(*fastRestore.State, "disabled") || strings.EqualFold(*fastRestore.State, "disabling") { + log.Error("detect conflict status", zap.String("snapshot", *fastRestore.SnapshotId), zap.String("status", *fastRestore.State)) + return errors.Errorf("status of snapshot %s is %s ", *fastRestore.SnapshotId, *fastRestore.State) + } + uncompletedSnapshots[*fastRestore.SnapshotId] = struct{}{} + } + } + pendingSnapshots = uncompletedSnapshots + } +} + +// DisableDataFSR disables FSR for data volume snapshots +func (e *EC2Session) DisableDataFSR(snapshotsIDsMap map[string][]*string) error { + if len(snapshotsIDsMap) == 0 { + return nil + } + + eg, _ := errgroup.WithContext(context.Background()) + + for availableZone := range snapshotsIDsMap { + targetAZ := availableZone + eg.Go(func() error { + resp, err := e.ec2.DisableFastSnapshotRestores(&ec2.DisableFastSnapshotRestoresInput{ + AvailabilityZones: []*string{&targetAZ}, + SourceSnapshotIds: snapshotsIDsMap[targetAZ], + }) + + if err != nil { + return errors.Trace(err) + } + + if len(resp.Unsuccessful) > 0 { + log.Warn("not all snapshots disabled FSR", zap.String("available zone", targetAZ)) + return errors.Errorf("Some snapshot fails to disable FSR for available zone %s, such as %s, error code is %v", targetAZ, *resp.Unsuccessful[0].SnapshotId, resp.Unsuccessful[0].FastSnapshotRestoreStateErrors) + } + + log.Info("Disable FSR issued", zap.String("available zone", targetAZ)) + + return nil + }) + } + return eg.Wait() +} + +func fetchTargetSnapshots(meta *config.EBSBasedBRMeta, specifiedAZ string) map[string][]*string { + var sourceSnapshotIDs = make(map[string][]*string) + + if len(meta.TiKVComponent.Stores) == 0 { + return sourceSnapshotIDs + } + + for i := range meta.TiKVComponent.Stores { + store := meta.TiKVComponent.Stores[i] + for j := range store.Volumes { + oldVol := store.Volumes[j] + // Handle data volume snapshots only + if strings.Compare(oldVol.Type, "storage.data-dir") == 0 { + if specifiedAZ != "" { + sourceSnapshotIDs[specifiedAZ] = append(sourceSnapshotIDs[specifiedAZ], &oldVol.SnapshotID) + } else { + sourceSnapshotIDs[oldVol.VolumeAZ] = append(sourceSnapshotIDs[oldVol.VolumeAZ], &oldVol.SnapshotID) + } + } + } + } + + return sourceSnapshotIDs +} + // CreateVolumes create volumes from snapshots // if err happens in the middle, return half-done result // returned map: store id -> old volume id -> new volume id @@ -377,7 +523,7 @@ func (e *EC2Session) WaitVolumesCreated(volumeIDMap map[string]string, progress for len(pendingVolumes) > 0 { // check every 5 seconds time.Sleep(5 * time.Second) - log.Info("check pending snapshots", zap.Int("count", len(pendingVolumes))) + log.Info("check pending volumes", zap.Int("count", len(pendingVolumes))) resp, err := e.ec2.DescribeVolumes(&ec2.DescribeVolumesInput{ VolumeIds: pendingVolumes, }) diff --git a/br/pkg/config/ebs.go b/br/pkg/config/ebs.go index deedb2d384403..5731738c14d2a 100644 --- a/br/pkg/config/ebs.go +++ b/br/pkg/config/ebs.go @@ -100,6 +100,14 @@ func (c *EBSBasedBRMeta) GetStoreCount() uint64 { return uint64(len(c.TiKVComponent.Stores)) } +func (c *EBSBasedBRMeta) GetTiKVVolumeCount() uint64 { + if c.TiKVComponent == nil || len(c.TiKVComponent.Stores) == 0 { + return 0 + } + // Assume TiKV nodes are symmetric + return uint64(len(c.TiKVComponent.Stores[0].Volumes)) +} + func (c *EBSBasedBRMeta) String() string { cfg, err := json.Marshal(c) if err != nil { diff --git a/br/pkg/lightning/backend/backend.go b/br/pkg/lightning/backend/backend.go index f1ebb9484843c..026dda56e8744 100644 --- a/br/pkg/lightning/backend/backend.go +++ b/br/pkg/lightning/backend/backend.go @@ -106,14 +106,15 @@ type ExternalEngineConfig struct { StorageURI string DataFiles []string StatFiles []string - MinKey []byte - MaxKey []byte + StartKey []byte + EndKey []byte SplitKeys [][]byte RegionSplitSize int64 // TotalFileSize can be an estimated value. TotalFileSize int64 // TotalKVCount can be an estimated value. TotalKVCount int64 + CheckHotspot bool } // CheckCtx contains all parameters used in CheckRequirements diff --git a/br/pkg/lightning/backend/external/BUILD.bazel b/br/pkg/lightning/backend/external/BUILD.bazel index 34e04ec24137d..2b017198725a6 100644 --- a/br/pkg/lightning/backend/external/BUILD.bazel +++ b/br/pkg/lightning/backend/external/BUILD.bazel @@ -9,6 +9,7 @@ go_library( "engine.go", "file.go", "iter.go", + "kv_buf.go", "kv_reader.go", "merge.go", "split.go", @@ -32,14 +33,17 @@ go_library( "//pkg/sessionctx/variable", "//pkg/util/hack", "//pkg/util/logutil", + "//pkg/util/memory", "//pkg/util/size", "@com_github_cockroachdb_pebble//:pebble", "@com_github_docker_go_units//:go-units", + "@com_github_google_uuid//:uuid", "@com_github_jfcg_sorty_v2//:sorty", "@com_github_pingcap_errors//:errors", "@org_golang_x_sync//errgroup", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", + "@org_uber_go_zap//zapcore", ], ) @@ -53,19 +57,22 @@ go_test( "engine_test.go", "file_test.go", "iter_test.go", + "kv_buf_test.go", "split_test.go", "util_test.go", "writer_test.go", ], embed = [":external"], flaky = True, - shard_count = 40, + shard_count = 43, deps = [ "//br/pkg/lightning/backend/kv", "//br/pkg/lightning/common", + "//br/pkg/membuf", "//br/pkg/storage", "//pkg/kv", "//pkg/util/codec", + "//pkg/util/logutil", "//pkg/util/size", "@com_github_aws_aws_sdk_go//aws", "@com_github_aws_aws_sdk_go//aws/credentials", diff --git a/br/pkg/lightning/backend/external/byte_reader.go b/br/pkg/lightning/backend/external/byte_reader.go index 86ffe9536f542..bed2661f50764 100644 --- a/br/pkg/lightning/backend/external/byte_reader.go +++ b/br/pkg/lightning/backend/external/byte_reader.go @@ -278,6 +278,8 @@ func (r *byteReader) reload() error { if err != nil { switch err { case io.EOF: + // move curBufOffset so following read will also find EOF + r.curBufOffset = len(r.curBuf) return err case io.ErrUnexpectedEOF: // The last batch. diff --git a/br/pkg/lightning/backend/external/engine.go b/br/pkg/lightning/backend/external/engine.go index 55a8c41371ee3..9c11a89c658a7 100644 --- a/br/pkg/lightning/backend/external/engine.go +++ b/br/pkg/lightning/backend/external/engine.go @@ -37,21 +37,39 @@ import ( "golang.org/x/sync/errgroup" ) +// during test on ks3, we found that we can open about 8000 connections to ks3, +// bigger than that, we might receive "connection reset by peer" error, and +// the read speed will be very slow, still investigating the reason. +// Also open too many connections will take many memory in kernel, and the +// test is based on k8s pod, not sure how it will behave on EC2. +// but, ks3 supporter says there's no such limit on connections. +// And our target for global sort is AWS s3, this default value might not fit well. +// TODO: adjust it according to cloud storage. +const maxCloudStorageConnections = 8000 + // Engine stored sorted key/value pairs in an external storage. type Engine struct { storage storage.ExternalStorage dataFiles []string statsFiles []string - minKey []byte - maxKey []byte + startKey []byte + endKey []byte splitKeys [][]byte regionSplitSize int64 bufPool *membuf.Pool + // checkHotspot is true means we will check hotspot file when using MergeKVIter. + // if hotspot file is detected, we will use multiple readers to read data. + // if it's false, MergeKVIter will read each file using 1 reader. + // this flag also affects the strategy of loading data, either: + // less load routine + check and read hotspot file concurrently (add-index uses this one) + // more load routine + read each file using 1 reader (import-into uses this one) + checkHotspot bool keyAdapter common.KeyAdapter duplicateDetection bool duplicateDB *pebble.DB dupDetectOpt common.DupDetectOpt + workerConcurrency int ts uint64 totalKVSize int64 @@ -66,31 +84,35 @@ func NewExternalEngine( storage storage.ExternalStorage, dataFiles []string, statsFiles []string, - minKey []byte, - maxKey []byte, + startKey []byte, + endKey []byte, splitKeys [][]byte, regionSplitSize int64, keyAdapter common.KeyAdapter, duplicateDetection bool, duplicateDB *pebble.DB, dupDetectOpt common.DupDetectOpt, + workerConcurrency int, ts uint64, totalKVSize int64, totalKVCount int64, + checkHotspot bool, ) common.Engine { return &Engine{ storage: storage, dataFiles: dataFiles, statsFiles: statsFiles, - minKey: minKey, - maxKey: maxKey, + startKey: startKey, + endKey: endKey, splitKeys: splitKeys, regionSplitSize: regionSplitSize, bufPool: membuf.NewPool(), + checkHotspot: checkHotspot, keyAdapter: keyAdapter, duplicateDetection: duplicateDetection, duplicateDB: duplicateDB, dupDetectOpt: dupDetectOpt, + workerConcurrency: workerConcurrency, ts: ts, totalKVSize: totalKVSize, totalKVCount: totalKVCount, @@ -119,6 +141,17 @@ func split[T any](in []T, groupNum int) [][]T { return ret } +func (e *Engine) getAdjustedConcurrency() int { + if e.checkHotspot { + // estimate we will open at most 1000 files, so if e.dataFiles is small we can + // try to concurrently process ranges. + adjusted := int(MergeSortOverlapThreshold) / len(e.dataFiles) + return min(adjusted, 8) + } + adjusted := min(e.workerConcurrency, maxCloudStorageConnections/len(e.dataFiles)) + return max(adjusted, 1) +} + // LoadIngestData loads the data from the external storage to memory in [start, // end) range, so local backend can ingest it. The used byte slice of ingest data // are allocated from Engine.bufPool and must be released by @@ -128,12 +161,16 @@ func (e *Engine) LoadIngestData( regionRanges []common.Range, outCh chan<- common.DataAndRange, ) error { - // estimate we will open at most 1000 files, so if e.dataFiles is small we can - // try to concurrently process ranges. - concurrency := int(MergeSortOverlapThreshold) / len(e.dataFiles) - concurrency = min(concurrency, 8) + concurrency := e.getAdjustedConcurrency() rangeGroups := split(regionRanges, concurrency) + logutil.Logger(ctx).Info("load ingest data", + zap.Int("concurrency", concurrency), + zap.Int("ranges", len(regionRanges)), + zap.Int("range-groups", len(rangeGroups)), + zap.Int("data-files", len(e.dataFiles)), + zap.Bool("check-hotspot", e.checkHotspot), + ) eg, egCtx := errgroup.WithContext(ctx) for _, ranges := range rangeGroups { ranges := ranges @@ -148,17 +185,10 @@ func (e *Engine) LoadIngestData( return iter.Error() } for _, r := range ranges { - results, err := e.loadIngestData(egCtx, iter, r.Start, r.End) + err := e.loadIngestData(egCtx, iter, r.Start, r.End, outCh) if err != nil { return errors.Trace(err) } - for _, result := range results { - select { - case <-egCtx.Done(): - return egCtx.Err() - case outCh <- result: - } - } } return nil }) @@ -192,21 +222,29 @@ func (e *Engine) loadIngestData( ctx context.Context, iter *MergeKVIter, start, end []byte, -) ([]common.DataAndRange, error) { + outCh chan<- common.DataAndRange) error { if bytes.Equal(start, end) { - return nil, errors.Errorf("start key and end key must not be the same: %s", + return errors.Errorf("start key and end key must not be the same: %s", hex.EncodeToString(start)) } - startTs := time.Now() + readRateHist := metrics.GlobalSortReadFromCloudStorageRate.WithLabelValues("read_and_sort") + readDurHist := metrics.GlobalSortReadFromCloudStorageDuration.WithLabelValues("read_and_sort") + sendFn := func(dr common.DataAndRange) error { + select { + case <-ctx.Done(): + return ctx.Err() + case outCh <- dr: + } + return nil + } + + loadStartTs, batchStartTs := time.Now(), time.Now() keys := make([][]byte, 0, 1024) values := make([][]byte, 0, 1024) memBuf := e.bufPool.NewBuffer() cnt := 0 size := 0 - totalSize := 0 - largeRegion := e.regionSplitSize > 2*int64(config.SplitRegionSize) - ret := make([]common.DataAndRange, 0, 1) curStart := start // there should be a key that just exceeds the end key in last loadIngestData @@ -217,7 +255,6 @@ func (e *Engine) loadIngestData( values = append(values, memBuf.AddBytes(v)) cnt++ size += len(k) + len(v) - totalSize += len(k) + len(v) } for iter.Next() { @@ -228,38 +265,49 @@ func (e *Engine) loadIngestData( if bytes.Compare(k, end) >= 0 { break } - if largeRegion && size > LargeRegionSplitDataThreshold { + // as we keep KV data in memory, to avoid OOM, we only keep at most 1 + // DataAndRange for each loadIngestData and regionJobWorker routine(channel + // is unbuffered). + if size > LargeRegionSplitDataThreshold { + readRateHist.Observe(float64(size) / 1024.0 / 1024.0 / time.Since(batchStartTs).Seconds()) + readDurHist.Observe(time.Since(batchStartTs).Seconds()) curKey := slices.Clone(k) - ret = append(ret, common.DataAndRange{ + if err := sendFn(common.DataAndRange{ Data: e.buildIngestData(keys, values, memBuf), Range: common.Range{Start: curStart, End: curKey}, - }) + }); err != nil { + return errors.Trace(err) + } keys = make([][]byte, 0, 1024) values = make([][]byte, 0, 1024) size = 0 curStart = curKey + batchStartTs = time.Now() + memBuf = e.bufPool.NewBuffer() } keys = append(keys, memBuf.AddBytes(k)) values = append(values, memBuf.AddBytes(v)) cnt++ size += len(k) + len(v) - totalSize += len(k) + len(v) } if iter.Error() != nil { - return nil, errors.Trace(iter.Error()) + return errors.Trace(iter.Error()) + } + if len(keys) > 0 { + readRateHist.Observe(float64(size) / 1024.0 / 1024.0 / time.Since(batchStartTs).Seconds()) + readDurHist.Observe(time.Since(batchStartTs).Seconds()) + if err := sendFn(common.DataAndRange{ + Data: e.buildIngestData(keys, values, memBuf), + Range: common.Range{Start: curStart, End: end}, + }); err != nil { + return errors.Trace(err) + } } - - metrics.GlobalSortReadFromCloudStorageRate.WithLabelValues("read_and_sort").Observe(float64(totalSize) / 1024.0 / 1024.0 / time.Since(startTs).Seconds()) - metrics.GlobalSortReadFromCloudStorageDuration.WithLabelValues("read_and_sort").Observe(time.Since(startTs).Seconds()) logutil.Logger(ctx).Info("load data from external storage", - zap.Duration("cost time", time.Since(startTs)), + zap.Duration("cost time", time.Since(loadStartTs)), zap.Int("iterated count", cnt)) - ret = append(ret, common.DataAndRange{ - Data: e.buildIngestData(keys, values, memBuf), - Range: common.Range{Start: curStart, End: end}, - }) - return ret, nil + return nil } func (e *Engine) createMergeIter(ctx context.Context, start kv.Key) (*MergeKVIter, error) { @@ -271,18 +319,18 @@ func (e *Engine) createMergeIter(ctx context.Context, start kv.Key) (*MergeKVIte logger.Info("no stats files", zap.String("startKey", hex.EncodeToString(start))) } else { - offs, err := seekPropsOffsets(ctx, start, e.statsFiles, e.storage) + offs, err := seekPropsOffsets(ctx, start, e.statsFiles, e.storage, e.checkHotspot) if err != nil { return nil, errors.Trace(err) } offsets = offs - logger.Info("seek props offsets", + logger.Debug("seek props offsets", zap.Uint64s("offsets", offsets), zap.String("startKey", hex.EncodeToString(start)), zap.Strings("dataFiles", e.dataFiles), zap.Strings("statsFiles", e.statsFiles)) } - iter, err := NewMergeKVIter(ctx, e.dataFiles, offsets, e.storage, 64*1024) + iter, err := NewMergeKVIter(ctx, e.dataFiles, offsets, e.storage, 64*1024, e.checkHotspot) if err != nil { return nil, errors.Trace(err) } @@ -305,8 +353,8 @@ func (e *Engine) ID() string { } // GetKeyRange implements common.Engine. -func (e *Engine) GetKeyRange() (firstKey []byte, lastKey []byte, err error) { - return e.minKey, e.maxKey, nil +func (e *Engine) GetKeyRange() (startKey []byte, endKey []byte, err error) { + return e.startKey, e.endKey, nil } // SplitRanges split the ranges by split keys provided by external engine. diff --git a/br/pkg/lightning/backend/external/engine_test.go b/br/pkg/lightning/backend/external/engine_test.go index fd673925f50a2..7d2a6eca318ca 100644 --- a/br/pkg/lightning/backend/external/engine_test.go +++ b/br/pkg/lightning/backend/external/engine_test.go @@ -17,6 +17,7 @@ package external import ( "bytes" "context" + "fmt" "path" "slices" "strconv" @@ -321,3 +322,29 @@ func TestSplit(t *testing.T) { require.Equal(t, c.expected, got) } } + +func TestGetAdjustedConcurrency(t *testing.T) { + genFiles := func(n int) []string { + files := make([]string, 0, n) + for i := 0; i < n; i++ { + files = append(files, fmt.Sprintf("file%d", i)) + } + return files + } + e := &Engine{ + checkHotspot: true, + workerConcurrency: 32, + dataFiles: genFiles(100), + } + require.Equal(t, 8, e.getAdjustedConcurrency()) + e.dataFiles = genFiles(1000) + require.Equal(t, 1, e.getAdjustedConcurrency()) + + e.checkHotspot = false + e.dataFiles = genFiles(100) + require.Equal(t, 32, e.getAdjustedConcurrency()) + e.dataFiles = genFiles(1000) + require.Equal(t, 8, e.getAdjustedConcurrency()) + e.dataFiles = genFiles(10000) + require.Equal(t, 1, e.getAdjustedConcurrency()) +} diff --git a/br/pkg/lightning/backend/external/file.go b/br/pkg/lightning/backend/external/file.go index 78b7a637ba98d..fd66938a03a0a 100644 --- a/br/pkg/lightning/backend/external/file.go +++ b/br/pkg/lightning/backend/external/file.go @@ -21,6 +21,9 @@ import ( "github.com/pingcap/tidb/br/pkg/storage" ) +// we use uint64 to store the length of key and value. +const lengthBytes = 8 + // KeyValueStore stores key-value pairs and maintains the range properties. type KeyValueStore struct { dataWriter storage.ExternalFileWriter @@ -46,51 +49,26 @@ func NewKeyValueStore( return kvStore, nil } -// AddKeyValue saves a key-value pair to the KeyValueStore. If the accumulated +// addEncodedData saves encoded key-value pairs to the KeyValueStore. +// data layout: keyLen + key + valueLen + value. If the accumulated // size or key count exceeds the given distance, a new range property will be // appended to the rangePropertiesCollector with current status. // `key` must be in strictly ascending order for invocations of a KeyValueStore. -func (s *KeyValueStore) AddKeyValue(key, value []byte) error { - var ( - b [8]byte - kvLen = 0 - ) - - // data layout: keyLen + key + valueLen + value - n, err := s.dataWriter.Write( - s.ctx, - binary.BigEndian.AppendUint64(b[:0], uint64(len(key))), - ) - if err != nil { - return err - } - kvLen += n - n, err = s.dataWriter.Write(s.ctx, key) - if err != nil { - return err - } - kvLen += n - n, err = s.dataWriter.Write( - s.ctx, - binary.BigEndian.AppendUint64(b[:0], uint64(len(value))), - ) - if err != nil { - return err - } - kvLen += n - n, err = s.dataWriter.Write(s.ctx, value) +func (s *KeyValueStore) addEncodedData(val []byte) error { + _, err := s.dataWriter.Write(s.ctx, val) if err != nil { return err } - kvLen += n + keyLen := binary.BigEndian.Uint64(val) + key := val[lengthBytes : lengthBytes+keyLen] if len(s.rc.currProp.firstKey) == 0 { s.rc.currProp.firstKey = key } s.rc.currProp.lastKey = key - s.offset += uint64(kvLen) - s.rc.currProp.size += uint64(len(key) + len(value)) + s.offset += uint64(len(val)) + s.rc.currProp.size += uint64(len(val) - lengthBytes*2) s.rc.currProp.keys++ if s.rc.currProp.size >= s.rc.propSizeDist || diff --git a/br/pkg/lightning/backend/external/file_test.go b/br/pkg/lightning/backend/external/file_test.go index 531b8a6e062dd..65ab999b17476 100644 --- a/br/pkg/lightning/backend/external/file_test.go +++ b/br/pkg/lightning/backend/external/file_test.go @@ -44,14 +44,14 @@ func TestAddKeyValueMaintainRangeProperty(t *testing.T) { require.Len(t, encoded, 0) k1, v1 := []byte("key1"), []byte("value1") - err = kvStore.AddKeyValue(k1, v1) + err = kvStore.addEncodedData(getEncodedData(k1, v1)) require.NoError(t, err) // when not accumulated enough data, no range property will be added. require.Equal(t, &initRC, rc) // propKeysDist = 2, so after adding 2 keys, a new range property will be added. k2, v2 := []byte("key2"), []byte("value2") - err = kvStore.AddKeyValue(k2, v2) + err = kvStore.addEncodedData(getEncodedData(k2, v2)) require.NoError(t, err) require.Len(t, rc.props, 1) expected := &rangeProperty{ @@ -67,7 +67,7 @@ func TestAddKeyValueMaintainRangeProperty(t *testing.T) { // when not accumulated enough data, no range property will be added. k3, v3 := []byte("key3"), []byte("value3") - err = kvStore.AddKeyValue(k3, v3) + err = kvStore.addEncodedData(getEncodedData(k3, v3)) require.NoError(t, err) require.Len(t, rc.props, 1) @@ -93,7 +93,7 @@ func TestAddKeyValueMaintainRangeProperty(t *testing.T) { rc.reset() kvStore, err = NewKeyValueStore(ctx, writer, rc) require.NoError(t, err) - err = kvStore.AddKeyValue(k1, v1) + err = kvStore.addEncodedData(getEncodedData(k1, v1)) require.NoError(t, err) require.Len(t, rc.props, 1) expected = &rangeProperty{ @@ -105,7 +105,7 @@ func TestAddKeyValueMaintainRangeProperty(t *testing.T) { } require.Equal(t, expected, rc.props[0]) - err = kvStore.AddKeyValue(k2, v2) + err = kvStore.addEncodedData(getEncodedData(k2, v2)) require.NoError(t, err) require.Len(t, rc.props, 2) expected = &rangeProperty{ @@ -149,7 +149,7 @@ func TestKVReadWrite(t *testing.T) { randLen = rand.Intn(10) + 1 values[i] = make([]byte, randLen) rand.Read(values[i]) - err = kvStore.AddKeyValue(keys[i], values[i]) + err = kvStore.addEncodedData(getEncodedData(keys[i], values[i])) require.NoError(t, err) } err = writer.Close(ctx) diff --git a/br/pkg/lightning/backend/external/iter.go b/br/pkg/lightning/backend/external/iter.go index 718a8f1143768..0cbda86c6a02a 100644 --- a/br/pkg/lightning/backend/external/iter.go +++ b/br/pkg/lightning/backend/external/iter.go @@ -81,15 +81,20 @@ func (h *mergeHeap[T]) Pop() interface{} { } type mergeIter[T heapElem, R sortedReader[T]] struct { - h mergeHeap[T] - readers []*R - curr T - lastReaderIdx int - err error - hotspotMap map[int]int - checkHotspotCnt int - lastHotspotIdx int - elemFromHotspot *T + h mergeHeap[T] + readers []*R + curr T + lastReaderIdx int + err error + + // determines whether to check reader hotspot, if hotspot is detected, we will + // try read this file concurrently. + checkHotspot bool + hotspotMap map[int]int + checkHotspotCnt int + checkHotspotPeriod int + lastHotspotIdx int + elemFromHotspot *T logger *zap.Logger } @@ -102,7 +107,7 @@ type readerOpenerFn[T heapElem, R sortedReader[T]] func() (*R, error) func newMergeIter[ T heapElem, R sortedReader[T], -](ctx context.Context, readerOpeners []readerOpenerFn[T, R]) (*mergeIter[T, R], error) { +](ctx context.Context, readerOpeners []readerOpenerFn[T, R], checkHotspot bool) (*mergeIter[T, R], error) { logger := logutil.Logger(ctx) readers := make([]*R, len(readerOpeners)) closeReaders := func() { @@ -148,9 +153,12 @@ func newMergeIter[ h: make(mergeHeap[T], 0, len(readers)), readers: readers, lastReaderIdx: -1, + checkHotspot: checkHotspot, hotspotMap: make(map[int]int), logger: logger, } + sampleKeySize := 0 + sampleKeyCnt := 0 for j := range i.readers { if i.readers[j] == nil { continue @@ -179,6 +187,15 @@ func newMergeIter[ elem: e, readerIdx: j, }) + sampleKeySize += len(e.sortKey()) + sampleKeyCnt++ + } + // We check the hotspot when the elements size is almost the same as the concurrent reader buffer size. + // So that we don't drop too many bytes if the hotspot shifts to other files. + if sampleKeySize == 0 || sampleKeySize/sampleKeyCnt == 0 { + i.checkHotspotPeriod = 10000 + } else { + i.checkHotspotPeriod = max(1000, ConcurrentReaderBufferSizePerConc*ConcurrentReaderConcurrency/(sampleKeySize/sampleKeyCnt)) } heap.Init(&i.h) return i, nil @@ -210,50 +227,50 @@ func (i *mergeIter[T, R]) currElem() T { return i.curr } -var checkHotspotPeriod = 1000 - // next forwards the iterator to the next element. It returns false if there is // no available element. func (i *mergeIter[T, R]) next() bool { var zeroT T i.curr = zeroT if i.lastReaderIdx >= 0 { - i.hotspotMap[i.lastReaderIdx] = i.hotspotMap[i.lastReaderIdx] + 1 - i.checkHotspotCnt++ - - // check hotspot every checkPeriod times - if i.checkHotspotCnt == checkHotspotPeriod { - oldHotspotIdx := i.lastHotspotIdx - i.lastHotspotIdx = -1 - for idx, cnt := range i.hotspotMap { - // currently only one reader will become hotspot - if cnt > (checkHotspotPeriod / 2) { - i.lastHotspotIdx = idx - break + if i.checkHotspot { + i.hotspotMap[i.lastReaderIdx] = i.hotspotMap[i.lastReaderIdx] + 1 + i.checkHotspotCnt++ + + // check hotspot every checkPeriod times + if i.checkHotspotCnt == i.checkHotspotPeriod { + oldHotspotIdx := i.lastHotspotIdx + i.lastHotspotIdx = -1 + for idx, cnt := range i.hotspotMap { + // currently only one reader will become hotspot + if cnt > (i.checkHotspotPeriod / 2) { + i.lastHotspotIdx = idx + break + } } - } - // we are going to switch concurrent reader and free its memory. Clone - // the fields to avoid use-after-free. - if oldHotspotIdx != i.lastHotspotIdx { - if i.elemFromHotspot != nil { - (*i.elemFromHotspot).cloneInnerFields() - i.elemFromHotspot = nil + // we are going to switch concurrent reader and free its memory. Clone + // the fields to avoid use-after-free. + if oldHotspotIdx != i.lastHotspotIdx { + if i.elemFromHotspot != nil { + (*i.elemFromHotspot).cloneInnerFields() + i.elemFromHotspot = nil + } } - } - for idx, rp := range i.readers { - if rp == nil { - continue - } - isHotspot := i.lastHotspotIdx == idx - err := (*rp).switchConcurrentMode(isHotspot) - if err != nil { - i.err = err - return false + for idx, rp := range i.readers { + if rp == nil { + continue + } + isHotspot := i.lastHotspotIdx == idx + err := (*rp).switchConcurrentMode(isHotspot) + if err != nil { + i.err = err + return false + } } + i.checkHotspotCnt = 0 + i.hotspotMap = make(map[int]int) } - i.checkHotspotCnt = 0 - i.hotspotMap = make(map[int]int) } rd := *i.readers[i.lastReaderIdx] @@ -261,7 +278,7 @@ func (i *mergeIter[T, R]) next() bool { switch err { case nil: - if i.lastReaderIdx == i.lastHotspotIdx { + if i.checkHotspot && i.lastReaderIdx == i.lastHotspotIdx { i.elemFromHotspot = &e } heap.Push(&i.h, mergeHeapElem[T]{elem: e, readerIdx: i.lastReaderIdx}) @@ -346,6 +363,7 @@ func NewMergeKVIter( pathsStartOffset []uint64, exStorage storage.ExternalStorage, readBufferSize int, + checkHotspot bool, ) (*MergeKVIter, error) { readerOpeners := make([]readerOpenerFn[*kvPair, kvReaderProxy], 0, len(paths)) largeBufSize := ConcurrentReaderBufferSizePerConc * ConcurrentReaderConcurrency @@ -373,7 +391,7 @@ func NewMergeKVIter( }) } - it, err := newMergeIter[*kvPair, kvReaderProxy](ctx, readerOpeners) + it, err := newMergeIter[*kvPair, kvReaderProxy](ctx, readerOpeners, checkHotspot) return &MergeKVIter{iter: it, memPool: memPool}, err } @@ -445,6 +463,7 @@ func NewMergePropIter( ctx context.Context, paths []string, exStorage storage.ExternalStorage, + checkHotSpot bool, ) (*MergePropIter, error) { readerOpeners := make([]readerOpenerFn[*rangeProperty, statReaderProxy], 0, len(paths)) for i := range paths { @@ -458,7 +477,7 @@ func NewMergePropIter( }) } - it, err := newMergeIter[*rangeProperty, statReaderProxy](ctx, readerOpeners) + it, err := newMergeIter[*rangeProperty, statReaderProxy](ctx, readerOpeners, checkHotSpot) return &MergePropIter{iter: it}, err } diff --git a/br/pkg/lightning/backend/external/iter_test.go b/br/pkg/lightning/backend/external/iter_test.go index c8fcbc06358a5..e9b27f3793b6e 100644 --- a/br/pkg/lightning/backend/external/iter_test.go +++ b/br/pkg/lightning/backend/external/iter_test.go @@ -24,7 +24,9 @@ import ( "time" "github.com/pingcap/tidb/br/pkg/lightning/common" + "github.com/pingcap/tidb/br/pkg/membuf" "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/pkg/util/logutil" "github.com/stretchr/testify/require" "go.uber.org/atomic" "golang.org/x/exp/rand" @@ -58,6 +60,15 @@ func (r *trackOpenFileReader) Close() error { return nil } +func getEncodedData(key, value []byte) []byte { + buf := make([]byte, 8*2+len(key)+len(value)) + binary.BigEndian.PutUint64(buf, uint64(len(key))) + copy(buf[8:], key) + binary.BigEndian.PutUint64(buf[8+len(key):], uint64(len(value))) + copy(buf[8*2+len(key):], value) + return buf +} + func TestMergeKVIter(t *testing.T) { ctx := context.Background() memStore := storage.NewMemStorage() @@ -78,7 +89,7 @@ func TestMergeKVIter(t *testing.T) { kvStore, err := NewKeyValueStore(ctx, writer, rc) require.NoError(t, err) for _, kv := range data[i] { - err = kvStore.AddKeyValue([]byte(kv[0]), []byte(kv[1])) + err = kvStore.addEncodedData(getEncodedData([]byte(kv[0]), []byte(kv[1]))) require.NoError(t, err) } err = writer.Close(ctx) @@ -86,7 +97,7 @@ func TestMergeKVIter(t *testing.T) { } trackStore := &trackOpenMemStorage{MemStorage: memStore} - iter, err := NewMergeKVIter(ctx, filenames, []uint64{0, 0, 0}, trackStore, 5) + iter, err := NewMergeKVIter(ctx, filenames, []uint64{0, 0, 0}, trackStore, 5, true) require.NoError(t, err) // close one empty file immediately in NewMergeKVIter require.EqualValues(t, 2, trackStore.opened.Load()) @@ -130,7 +141,7 @@ func TestOneUpstream(t *testing.T) { kvStore, err := NewKeyValueStore(ctx, writer, rc) require.NoError(t, err) for _, kv := range data[i] { - err = kvStore.AddKeyValue([]byte(kv[0]), []byte(kv[1])) + err = kvStore.addEncodedData(getEncodedData([]byte(kv[0]), []byte(kv[1]))) require.NoError(t, err) } err = writer.Close(ctx) @@ -138,7 +149,7 @@ func TestOneUpstream(t *testing.T) { } trackStore := &trackOpenMemStorage{MemStorage: memStore} - iter, err := NewMergeKVIter(ctx, filenames, []uint64{0, 0, 0}, trackStore, 5) + iter, err := NewMergeKVIter(ctx, filenames, []uint64{0, 0, 0}, trackStore, 5, true) require.NoError(t, err) require.EqualValues(t, 1, trackStore.opened.Load()) @@ -175,14 +186,14 @@ func TestAllEmpty(t *testing.T) { } trackStore := &trackOpenMemStorage{MemStorage: memStore} - iter, err := NewMergeKVIter(ctx, []string{filenames[0]}, []uint64{0}, trackStore, 5) + iter, err := NewMergeKVIter(ctx, []string{filenames[0]}, []uint64{0}, trackStore, 5, false) require.NoError(t, err) require.EqualValues(t, 0, trackStore.opened.Load()) require.False(t, iter.Next()) require.NoError(t, iter.Error()) require.NoError(t, iter.Close()) - iter, err = NewMergeKVIter(ctx, filenames, []uint64{0, 0}, trackStore, 5) + iter, err = NewMergeKVIter(ctx, filenames, []uint64{0, 0}, trackStore, 5, false) require.NoError(t, err) require.EqualValues(t, 0, trackStore.opened.Load()) require.False(t, iter.Next()) @@ -208,7 +219,7 @@ func TestCorruptContent(t *testing.T) { kvStore, err := NewKeyValueStore(ctx, writer, rc) require.NoError(t, err) for _, kv := range data[i] { - err = kvStore.AddKeyValue([]byte(kv[0]), []byte(kv[1])) + err = kvStore.addEncodedData(getEncodedData([]byte(kv[0]), []byte(kv[1]))) require.NoError(t, err) } if i == 0 { @@ -220,7 +231,7 @@ func TestCorruptContent(t *testing.T) { } trackStore := &trackOpenMemStorage{MemStorage: memStore} - iter, err := NewMergeKVIter(ctx, filenames, []uint64{0, 0, 0}, trackStore, 5) + iter, err := NewMergeKVIter(ctx, filenames, []uint64{0, 0, 0}, trackStore, 5, true) require.NoError(t, err) require.EqualValues(t, 2, trackStore.opened.Load()) @@ -308,7 +319,7 @@ func testMergeIterSwitchMode(t *testing.T, f func([]byte, int) []byte) { offsets := make([]uint64, len(dataNames)) - iter, err := NewMergeKVIter(context.Background(), dataNames, offsets, st, 2048) + iter, err := NewMergeKVIter(context.Background(), dataNames, offsets, st, 2048, true) require.NoError(t, err) for iter.Next() { @@ -317,13 +328,44 @@ func testMergeIterSwitchMode(t *testing.T, f func([]byte, int) []byte) { require.NoError(t, err) } -func TestHotspot(t *testing.T) { - backup := checkHotspotPeriod - checkHotspotPeriod = 2 - t.Cleanup(func() { - checkHotspotPeriod = backup - }) +type eofReader struct { + storage.ExternalFileReader +} + +func (r eofReader) Seek(_ int64, _ int) (int64, error) { + return 0, nil +} + +func (r eofReader) Read(_ []byte) (int, error) { + return 0, io.EOF +} +func TestReadAfterCloseConnReader(t *testing.T) { + ctx := context.Background() + + reader := &byteReader{ + ctx: ctx, + storageReader: eofReader{}, + smallBuf: []byte{0, 255, 255, 255, 255, 255, 255, 255}, + curBufOffset: 8, + logger: logutil.Logger(ctx), + } + reader.curBuf = reader.smallBuf + pool := membuf.NewPool() + reader.concurrentReader.largeBufferPool = pool.NewBuffer() + reader.concurrentReader.store = storage.NewMemStorage() + + // set current reader to concurrent reader, and then close it + reader.concurrentReader.now = true + err := reader.switchConcurrentMode(false) + require.NoError(t, err) + + wrapKVReader := &kvReader{reader} + _, _, err = wrapKVReader.nextKV() + require.ErrorIs(t, err, io.EOF) +} + +func TestHotspot(t *testing.T) { ctx := context.Background() store := storage.NewMemStorage() @@ -345,7 +387,7 @@ func TestHotspot(t *testing.T) { kvStore, err := NewKeyValueStore(ctx, writer, rc) require.NoError(t, err) for _, k := range keys[i] { - err = kvStore.AddKeyValue([]byte(k), value) + err = kvStore.addEncodedData(getEncodedData([]byte(k), value)) require.NoError(t, err) } err = writer.Close(ctx) @@ -353,8 +395,9 @@ func TestHotspot(t *testing.T) { } // readerBufSize = 8+5+8+5, every KV will cause reload - iter, err := NewMergeKVIter(ctx, filenames, make([]uint64, len(filenames)), store, 26) + iter, err := NewMergeKVIter(ctx, filenames, make([]uint64, len(filenames)), store, 26, true) require.NoError(t, err) + iter.iter.checkHotspotPeriod = 2 // after read key00 and key01 from reader_0, it becomes hotspot require.True(t, iter.Next()) require.Equal(t, "key00", string(iter.Key())) @@ -404,15 +447,10 @@ func TestHotspot(t *testing.T) { } func TestMemoryUsageWhenHotspotChange(t *testing.T) { - backup := checkHotspotPeriod - checkHotspotPeriod = 10 - t.Cleanup(func() { - checkHotspotPeriod = backup - }) - backup2 := ConcurrentReaderBufferSizePerConc + backup := ConcurrentReaderBufferSizePerConc ConcurrentReaderBufferSizePerConc = 100 * 1024 * 1024 // 100MB, make memory leak more obvious t.Cleanup(func() { - ConcurrentReaderBufferSizePerConc = backup2 + ConcurrentReaderBufferSizePerConc = backup }) getMemoryInUse := func() uint64 { @@ -443,16 +481,16 @@ func TestMemoryUsageWhenHotspotChange(t *testing.T) { rc.reset() kvStore, err := NewKeyValueStore(ctx, writer, rc) require.NoError(t, err) - for j := 0; j < checkHotspotPeriod; j++ { + for j := 0; j < 1000; j++ { key := fmt.Sprintf("key%06d", cur) val := fmt.Sprintf("value%06d", cur) - err = kvStore.AddKeyValue([]byte(key), []byte(val)) + err = kvStore.addEncodedData(getEncodedData([]byte(key), []byte(val))) require.NoError(t, err) cur++ } for j := 0; j <= 12; j++ { key := fmt.Sprintf("key999%06d", cur+j) - err = kvStore.AddKeyValue([]byte(key), largeChunk) + err = kvStore.addEncodedData(getEncodedData([]byte(key), largeChunk)) require.NoError(t, err) } err = writer.Close(ctx) @@ -461,8 +499,9 @@ func TestMemoryUsageWhenHotspotChange(t *testing.T) { beforeMem := getMemoryInUse() - iter, err := NewMergeKVIter(ctx, filenames, make([]uint64, len(filenames)), store, 1024) + iter, err := NewMergeKVIter(ctx, filenames, make([]uint64, len(filenames)), store, 1024, true) require.NoError(t, err) + iter.iter.checkHotspotPeriod = 10 i := 0 for cur > 0 { cur-- diff --git a/br/pkg/lightning/backend/external/kv_buf.go b/br/pkg/lightning/backend/external/kv_buf.go new file mode 100644 index 0000000000000..5079c3f475b41 --- /dev/null +++ b/br/pkg/lightning/backend/external/kv_buf.go @@ -0,0 +1,75 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package external + +import "github.com/docker/go-units" + +// DefaultBlockSize is the default block size for preAllocKVBuf. +const DefaultBlockSize = 16 * units.MiB + +// preAllocKVBuf pre allocates a large buffer of limit memLimit to reduce memory +// allocation, all space in this buffer will be reused when reset. +type preAllocKVBuf struct { + blocks [][]byte + blockSize int + curBlock []byte + curBlockIdx int + curIdx int +} + +func newPreAllocKVBuf(memLimit uint64, blockSize int) *preAllocKVBuf { + blockCount := (memLimit + uint64(blockSize) - 1) / uint64(blockSize) + b := &preAllocKVBuf{ + blocks: make([][]byte, 0, blockCount), + blockSize: blockSize, + } + for i := 0; i < int(blockCount); i++ { + b.blocks = append(b.blocks, make([]byte, blockSize)) + } + b.reset() + return b +} + +func (b *preAllocKVBuf) Alloc(s int) (blockIdx int32, res []byte, offset int32, allocated bool) { + if s > b.blockSize { + return + } + if b.blockSize-b.curIdx < s { + if b.curBlockIdx+1 >= len(b.blocks) { + return + } + b.curBlockIdx++ + b.curBlock = b.blocks[b.curBlockIdx] + b.curIdx = 0 + } + blockIdx = int32(b.curBlockIdx) + res = b.curBlock[b.curIdx : b.curIdx+s] + offset = int32(b.curIdx) + allocated = true + + b.curIdx += s + return +} + +func (b *preAllocKVBuf) reset() { + b.curBlockIdx = 0 + b.curBlock = b.blocks[0] + b.curIdx = 0 +} + +func (b *preAllocKVBuf) destroy() { + b.blocks = nil + b.curBlock = nil +} diff --git a/br/pkg/lightning/backend/external/kv_buf_test.go b/br/pkg/lightning/backend/external/kv_buf_test.go new file mode 100644 index 0000000000000..fddb3230b417d --- /dev/null +++ b/br/pkg/lightning/backend/external/kv_buf_test.go @@ -0,0 +1,77 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package external + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewPreAllocKVBuf(t *testing.T) { + cases := []struct { + memLimit uint64 + expectBlocks int + }{ + {1, 1}, + {15, 1}, + {16, 1}, + {17, 2}, + {31, 2}, + {32, 2}, + } + for _, c := range cases { + buf := newPreAllocKVBuf(c.memLimit, 16) + require.Equal(t, c.expectBlocks, len(buf.blocks)) + require.Equal(t, 16, buf.blockSize) + require.Equal(t, buf.blocks[0], buf.curBlock) + require.Equal(t, 0, buf.curBlockIdx) + require.Equal(t, 0, buf.curIdx) + } + + buf := newPreAllocKVBuf(16, 8) + // alloc larger than block size. + _, _, _, allocated := buf.Alloc(9) + require.False(t, allocated) + blockIdx, res, offset, allocated := buf.Alloc(8) + require.Equal(t, int32(0), blockIdx) + require.Equal(t, int32(0), offset) + require.True(t, allocated) + copy(res, "12345678") + blockIdx, res, offset, allocated = buf.Alloc(4) + require.Equal(t, int32(1), blockIdx) + require.Equal(t, int32(0), offset) + require.True(t, allocated) + copy(res, "aaaa") + blockIdx, res, offset, allocated = buf.Alloc(4) + require.Equal(t, int32(1), blockIdx) + require.Equal(t, int32(4), offset) + require.True(t, allocated) + copy(res, "bbbb") + _, _, _, allocated = buf.Alloc(4) + require.False(t, allocated) + + require.Equal(t, "12345678", string(buf.blocks[0])) + require.Equal(t, "aaaabbbb", string(buf.blocks[1])) + + buf.reset() + require.Equal(t, buf.blocks[0], buf.curBlock) + require.Equal(t, 0, buf.curBlockIdx) + require.Equal(t, 0, buf.curIdx) + + buf.destroy() + require.Nil(t, buf.blocks) + require.Nil(t, buf.curBlock) +} diff --git a/br/pkg/lightning/backend/external/merge.go b/br/pkg/lightning/backend/external/merge.go index 2e74159aac5b1..1de2dcc34ab06 100644 --- a/br/pkg/lightning/backend/external/merge.go +++ b/br/pkg/lightning/backend/external/merge.go @@ -3,42 +3,99 @@ package external import ( "context" + "github.com/google/uuid" "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/memory" + "go.uber.org/zap" + "golang.org/x/sync/errgroup" ) // MergeOverlappingFiles reads from given files whose key range may overlap // and writes to new sorted, nonoverlapping files. -func MergeOverlappingFiles( - ctx context.Context, +func MergeOverlappingFiles(ctx context.Context, paths []string, store storage.ExternalStorage, readBufferSize int, + newFilePrefix string, blockSize int, writeBatchCount uint64, propSizeDist uint64, propKeysDist uint64, + onClose OnCloseFunc, concurrency int, checkHotspot bool) error { + var dataFilesSlice [][]string + batchCount := 1 + if len(paths) > concurrency { + batchCount = len(paths) / concurrency + } + for i := 0; i < len(paths); i += batchCount { + end := i + batchCount + if end > len(paths) { + end = len(paths) + } + dataFilesSlice = append(dataFilesSlice, paths[i:end]) + } + + memTotal, err := memory.MemTotal() + if err != nil { + return err + } + memSize := (memTotal / 2) / uint64(len(dataFilesSlice)) + + var eg errgroup.Group + for _, files := range dataFilesSlice { + files := files + eg.Go(func() error { + return mergeOverlappingFilesImpl( + ctx, + files, + store, + readBufferSize, + newFilePrefix, + uuid.New().String(), + memSize, + blockSize, + writeBatchCount, + propSizeDist, + propKeysDist, + onClose, + checkHotspot, + ) + }) + } + return eg.Wait() +} + +func mergeOverlappingFilesImpl(ctx context.Context, paths []string, store storage.ExternalStorage, readBufferSize int, newFilePrefix string, writerID string, memSizeLimit uint64, + blockSize int, writeBatchCount uint64, propSizeDist uint64, propKeysDist uint64, onClose OnCloseFunc, + checkHotspot bool, ) error { zeroOffsets := make([]uint64, len(paths)) - iter, err := NewMergeKVIter(ctx, paths, zeroOffsets, store, readBufferSize) + iter, err := NewMergeKVIter(ctx, paths, zeroOffsets, store, readBufferSize, checkHotspot) if err != nil { return err } - defer iter.Close() + defer func() { + err := iter.Close() + if err != nil { + logutil.Logger(ctx).Warn("close iterator failed", zap.Error(err)) + } + }() writer := NewWriterBuilder(). SetMemorySizeLimit(memSizeLimit). + SetBlockSize(blockSize). + SetOnCloseFunc(onClose). SetWriterBatchCount(writeBatchCount). - SetPropKeysDistance(propKeysDist). SetPropSizeDistance(propSizeDist). - SetOnCloseFunc(onClose). + SetPropKeysDistance(propKeysDist). Build(store, newFilePrefix, writerID) // currently use same goroutine to do read and write. The main advantage is // there's no KV copy and iter can reuse the buffer. - for iter.Next() { err = writer.WriteRow(ctx, iter.Key(), iter.Value(), nil) if err != nil { diff --git a/br/pkg/lightning/backend/external/split.go b/br/pkg/lightning/backend/external/split.go index 40d713ed32259..2d24001e83621 100644 --- a/br/pkg/lightning/backend/external/split.go +++ b/br/pkg/lightning/backend/external/split.go @@ -95,8 +95,9 @@ func NewRangeSplitter( externalStorage storage.ExternalStorage, rangesGroupSize, rangesGroupKeys int64, maxRangeSize, maxRangeKeys int64, + checkHotSpot bool, ) (*RangeSplitter, error) { - propIter, err := NewMergePropIter(ctx, statFiles, externalStorage) + propIter, err := NewMergePropIter(ctx, statFiles, externalStorage, checkHotSpot) if err != nil { return nil, err } diff --git a/br/pkg/lightning/backend/external/split_test.go b/br/pkg/lightning/backend/external/split_test.go index 056b360833b18..a49f697b46116 100644 --- a/br/pkg/lightning/backend/external/split_test.go +++ b/br/pkg/lightning/backend/external/split_test.go @@ -55,7 +55,7 @@ func TestGeneralProperties(t *testing.T) { dataFiles, statFiles, err := MockExternalEngine(memStore, keys, values) require.NoError(t, err) splitter, err := NewRangeSplitter( - ctx, dataFiles, statFiles, memStore, 1000, 30, 1000, 1, + ctx, dataFiles, statFiles, memStore, 1000, 30, 1000, 1, true, ) var lastEndKey []byte notExhausted: @@ -111,7 +111,7 @@ func TestOnlyOneGroup(t *testing.T) { require.NoError(t, err) splitter, err := NewRangeSplitter( - ctx, dataFiles, statFiles, memStore, 1000, 30, 1000, 10, + ctx, dataFiles, statFiles, memStore, 1000, 30, 1000, 10, true, ) require.NoError(t, err) endKey, dataFiles, statFiles, splitKeys, err := splitter.SplitOneRangesGroup() @@ -123,7 +123,7 @@ func TestOnlyOneGroup(t *testing.T) { require.NoError(t, splitter.Close()) splitter, err = NewRangeSplitter( - ctx, dataFiles, statFiles, memStore, 1000, 30, 1000, 1, + ctx, dataFiles, statFiles, memStore, 1000, 30, 1000, 1, true, ) require.NoError(t, err) endKey, dataFiles, statFiles, splitKeys, err = splitter.SplitOneRangesGroup() @@ -144,7 +144,7 @@ func TestSortedData(t *testing.T) { values := make([][]byte, kvNum) for i := range keys { keys[i] = []byte(fmt.Sprintf("key%03d", i)) - values[i] = []byte(fmt.Sprintf("value%03d", i)) + values[i] = []byte(fmt.Sprintf("val%03d", i)) } dataFiles, statFiles, err := MockExternalEngine(memStore, keys, values) @@ -156,7 +156,7 @@ func TestSortedData(t *testing.T) { groupFileNumUpperBound := int(math.Ceil(float64(rangesGroupKV-1)/avgKVPerFile)) + 1 splitter, err := NewRangeSplitter( - ctx, dataFiles, statFiles, memStore, 1000, int64(rangesGroupKV), 1000, 10, + ctx, dataFiles, statFiles, memStore, 1000, int64(rangesGroupKV), 1000, 10, true, ) require.NoError(t, err) @@ -177,7 +177,8 @@ func TestRangeSplitterStrictCase(t *testing.T) { subDir := "/mock-test" writer1 := NewWriterBuilder(). - SetMemorySizeLimit(15). // slightly larger than len("key01") + len("value01") + SetMemorySizeLimit(2*(lengthBytes*2+10)). + SetBlockSize(2*(lengthBytes*2+10)). SetPropSizeDistance(1). SetPropKeysDistance(1). Build(memStore, subDir, "1") @@ -185,7 +186,7 @@ func TestRangeSplitterStrictCase(t *testing.T) { []byte("key01"), []byte("key11"), []byte("key21"), } values1 := [][]byte{ - []byte("value01"), []byte("value11"), []byte("value21"), + []byte("val01"), []byte("val11"), []byte("val21"), } dataFiles1, statFiles1, err := MockExternalEngineWithWriter(memStore, writer1, subDir, keys1, values1) require.NoError(t, err) @@ -193,7 +194,8 @@ func TestRangeSplitterStrictCase(t *testing.T) { require.Len(t, statFiles1, 2) writer2 := NewWriterBuilder(). - SetMemorySizeLimit(15). + SetMemorySizeLimit(2*(lengthBytes*2+10)). + SetBlockSize(2*(lengthBytes*2+10)). SetPropSizeDistance(1). SetPropKeysDistance(1). Build(memStore, subDir, "2") @@ -201,7 +203,7 @@ func TestRangeSplitterStrictCase(t *testing.T) { []byte("key02"), []byte("key12"), []byte("key22"), } values2 := [][]byte{ - []byte("value02"), []byte("value12"), []byte("value22"), + []byte("val02"), []byte("val12"), []byte("val22"), } dataFiles12, statFiles12, err := MockExternalEngineWithWriter(memStore, writer2, subDir, keys2, values2) require.NoError(t, err) @@ -209,7 +211,8 @@ func TestRangeSplitterStrictCase(t *testing.T) { require.Len(t, statFiles12, 4) writer3 := NewWriterBuilder(). - SetMemorySizeLimit(15). + SetMemorySizeLimit(2*(lengthBytes*2+10)). + SetBlockSize(2*(lengthBytes*2+10)). SetPropSizeDistance(1). SetPropKeysDistance(1). Build(memStore, subDir, "3") @@ -217,7 +220,7 @@ func TestRangeSplitterStrictCase(t *testing.T) { []byte("key03"), []byte("key13"), []byte("key23"), } values3 := [][]byte{ - []byte("value03"), []byte("value13"), []byte("value23"), + []byte("val03"), []byte("val13"), []byte("val23"), } dataFiles123, statFiles123, err := MockExternalEngineWithWriter(memStore, writer3, subDir, keys3, values3) require.NoError(t, err) @@ -234,7 +237,7 @@ func TestRangeSplitterStrictCase(t *testing.T) { // group keys = 2, region keys = 1 splitter, err := NewRangeSplitter( - ctx, dataFiles123, statFiles123, memStore, 1000, 2, 1000, 1, + ctx, dataFiles123, statFiles123, memStore, 1000, 2, 1000, 1, true, ) require.NoError(t, err) @@ -316,7 +319,7 @@ func TestExactlyKeyNum(t *testing.T) { // maxRangeKeys = 3 splitter, err := NewRangeSplitter( - ctx, dataFiles, statFiles, memStore, 1000, 100, 1000, 3, + ctx, dataFiles, statFiles, memStore, 1000, 100, 1000, 3, true, ) require.NoError(t, err) endKey, splitDataFiles, splitStatFiles, splitKeys, err := splitter.SplitOneRangesGroup() @@ -328,7 +331,7 @@ func TestExactlyKeyNum(t *testing.T) { // rangesGroupKeys = 3 splitter, err = NewRangeSplitter( - ctx, dataFiles, statFiles, memStore, 1000, 3, 1000, 1, + ctx, dataFiles, statFiles, memStore, 1000, 3, 1000, 1, true, ) require.NoError(t, err) endKey, splitDataFiles, splitStatFiles, splitKeys, err = splitter.SplitOneRangesGroup() diff --git a/br/pkg/lightning/backend/external/util.go b/br/pkg/lightning/backend/external/util.go index c5e9f6df49d3b..20683a3e769af 100644 --- a/br/pkg/lightning/backend/external/util.go +++ b/br/pkg/lightning/backend/external/util.go @@ -22,11 +22,13 @@ import ( "sort" "strings" + "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" + "go.uber.org/zap/zapcore" ) // seekPropsOffsets seeks the statistic files to find the largest offset of @@ -37,12 +39,15 @@ func seekPropsOffsets( start kv.Key, paths []string, exStorage storage.ExternalStorage, -) ([]uint64, error) { - iter, err := NewMergePropIter(ctx, paths, exStorage) + checkHotSpot bool, +) (_ []uint64, err error) { + logger := logutil.Logger(ctx) + task := log.BeginTask(logger, "seek props offsets") + defer task.End(zapcore.ErrorLevel, err) + iter, err := NewMergePropIter(ctx, paths, exStorage, checkHotSpot) if err != nil { return nil, err } - logger := logutil.Logger(ctx) defer func() { if err := iter.Close(); err != nil { logger.Warn("failed to close merge prop iterator", zap.Error(err)) @@ -128,7 +133,8 @@ func MockExternalEngine( ) (dataFiles []string, statsFiles []string, err error) { subDir := "/mock-test" writer := NewWriterBuilder(). - SetMemorySizeLimit(128). + SetMemorySizeLimit(10*(lengthBytes*2+10)). + SetBlockSize(10*(lengthBytes*2+10)). SetPropSizeDistance(32). SetPropKeysDistance(4). Build(storage, "/mock-test", "0") @@ -205,41 +211,39 @@ func GetMaxOverlapping(points []Endpoint) int64 { // SortedKVMeta is the meta of sorted kv. type SortedKVMeta struct { - MinKey []byte `json:"min-key"` - MaxKey []byte `json:"max-key"` - TotalKVSize uint64 `json:"total-kv-size"` - // seems those 2 fields always generated from MultipleFilesStats, - // maybe remove them later. - DataFiles []string `json:"data-files"` - StatFiles []string `json:"stat-files"` + StartKey []byte `json:"start-key"` + EndKey []byte `json:"end-key"` // exclusive + TotalKVSize uint64 `json:"total-kv-size"` MultipleFilesStats []MultipleFilesStat `json:"multiple-files-stats"` } -// NewSortedKVMeta creates a SortedKVMeta from a WriterSummary. +// NewSortedKVMeta creates a SortedKVMeta from a WriterSummary. If the summary +// is empty, it will return a pointer to zero SortedKVMeta. func NewSortedKVMeta(summary *WriterSummary) *SortedKVMeta { - meta := &SortedKVMeta{ - MinKey: summary.Min.Clone(), - MaxKey: summary.Max.Clone(), + if summary == nil || (len(summary.Min) == 0 && len(summary.Max) == 0) { + return &SortedKVMeta{} + } + return &SortedKVMeta{ + StartKey: summary.Min.Clone(), + EndKey: summary.Max.Clone().Next(), TotalKVSize: summary.TotalSize, MultipleFilesStats: summary.MultipleFilesStats, } - for _, f := range summary.MultipleFilesStats { - for _, filename := range f.Filenames { - meta.DataFiles = append(meta.DataFiles, filename[0]) - meta.StatFiles = append(meta.StatFiles, filename[1]) - } - } - return meta } // Merge merges the other SortedKVMeta into this one. func (m *SortedKVMeta) Merge(other *SortedKVMeta) { - m.MinKey = NotNilMin(m.MinKey, other.MinKey) - m.MaxKey = NotNilMax(m.MaxKey, other.MaxKey) - m.TotalKVSize += other.TotalKVSize + if len(other.StartKey) == 0 && len(other.EndKey) == 0 { + return + } + if len(m.StartKey) == 0 && len(m.EndKey) == 0 { + *m = *other + return + } - m.DataFiles = append(m.DataFiles, other.DataFiles...) - m.StatFiles = append(m.StatFiles, other.StatFiles...) + m.StartKey = BytesMin(m.StartKey, other.StartKey) + m.EndKey = BytesMax(m.EndKey, other.EndKey) + m.TotalKVSize += other.TotalKVSize m.MultipleFilesStats = append(m.MultipleFilesStats, other.MultipleFilesStats...) } @@ -249,28 +253,38 @@ func (m *SortedKVMeta) MergeSummary(summary *WriterSummary) { m.Merge(NewSortedKVMeta(summary)) } -// NotNilMin returns the smallest of a and b, ignoring nil values. -func NotNilMin(a, b []byte) []byte { - if len(a) == 0 { - return b +// GetDataFiles returns all data files in the meta. +func (m *SortedKVMeta) GetDataFiles() []string { + var ret []string + for _, stat := range m.MultipleFilesStats { + for _, files := range stat.Filenames { + ret = append(ret, files[0]) + } } - if len(b) == 0 { - return a + return ret +} + +// GetStatFiles returns all stat files in the meta. +func (m *SortedKVMeta) GetStatFiles() []string { + var ret []string + for _, stat := range m.MultipleFilesStats { + for _, files := range stat.Filenames { + ret = append(ret, files[1]) + } } + return ret +} + +// BytesMin returns the smallest of byte slice a and b. +func BytesMin(a, b []byte) []byte { if bytes.Compare(a, b) < 0 { return a } return b } -// NotNilMax returns the largest of a and b, ignoring nil values. -func NotNilMax(a, b []byte) []byte { - if len(a) == 0 { - return b - } - if len(b) == 0 { - return a - } +// BytesMax returns the largest of byte slice a and b. +func BytesMax(a, b []byte) []byte { if bytes.Compare(a, b) > 0 { return a } diff --git a/br/pkg/lightning/backend/external/util_test.go b/br/pkg/lightning/backend/external/util_test.go index 62a53d8cfb97b..e9c32e5bae2a2 100644 --- a/br/pkg/lightning/backend/external/util_test.go +++ b/br/pkg/lightning/backend/external/util_test.go @@ -70,18 +70,18 @@ func TestSeekPropsOffsets(t *testing.T) { err = w2.Close(ctx) require.NoError(t, err) - got, err := seekPropsOffsets(ctx, []byte("key2.5"), []string{file1, file2}, store) + got, err := seekPropsOffsets(ctx, []byte("key2.5"), []string{file1, file2}, store, true) require.NoError(t, err) require.Equal(t, []uint64{10, 20}, got) - got, err = seekPropsOffsets(ctx, []byte("key3"), []string{file1, file2}, store) + got, err = seekPropsOffsets(ctx, []byte("key3"), []string{file1, file2}, store, true) require.NoError(t, err) require.Equal(t, []uint64{30, 20}, got) - _, err = seekPropsOffsets(ctx, []byte("key0"), []string{file1, file2}, store) + _, err = seekPropsOffsets(ctx, []byte("key0"), []string{file1, file2}, store, true) require.ErrorContains(t, err, "start key 6b657930 is too small for stat files [/test1 /test2]") - got, err = seekPropsOffsets(ctx, []byte("key1"), []string{file1, file2}, store) + got, err = seekPropsOffsets(ctx, []byte("key1"), []string{file1, file2}, store, false) require.NoError(t, err) require.Equal(t, []uint64{10, 0}, got) - got, err = seekPropsOffsets(ctx, []byte("key999"), []string{file1, file2}, store) + got, err = seekPropsOffsets(ctx, []byte("key999"), []string{file1, file2}, store, false) require.NoError(t, err) require.Equal(t, []uint64{50, 40}, got) @@ -98,7 +98,7 @@ func TestSeekPropsOffsets(t *testing.T) { require.NoError(t, err) err = w4.Close(ctx) require.NoError(t, err) - got, err = seekPropsOffsets(ctx, []byte("key3"), []string{file1, file2, file3, file4}, store) + got, err = seekPropsOffsets(ctx, []byte("key3"), []string{file1, file2, file3, file4}, store, true) require.NoError(t, err) require.Equal(t, []uint64{30, 20, 0, 30}, got) } @@ -107,7 +107,8 @@ func TestGetAllFileNames(t *testing.T) { ctx := context.Background() store := storage.NewMemStorage() w := NewWriterBuilder(). - SetMemorySizeLimit(20). + SetMemorySizeLimit(10*(lengthBytes*2+2)). + SetBlockSize(10*(lengthBytes*2+2)). SetPropSizeDistance(5). SetPropKeysDistance(3). Build(store, "/subtask", "0") @@ -127,7 +128,8 @@ func TestGetAllFileNames(t *testing.T) { require.NoError(t, err) w2 := NewWriterBuilder(). - SetMemorySizeLimit(20). + SetMemorySizeLimit(10*(lengthBytes*2+2)). + SetBlockSize(10*(lengthBytes*2+2)). SetPropSizeDistance(5). SetPropKeysDistance(3). Build(store, "/subtask", "3") @@ -140,7 +142,8 @@ func TestGetAllFileNames(t *testing.T) { require.NoError(t, err) w3 := NewWriterBuilder(). - SetMemorySizeLimit(20). + SetMemorySizeLimit(10*(lengthBytes*2+2)). + SetBlockSize(10*(lengthBytes*2+2)). SetPropSizeDistance(5). SetPropKeysDistance(3). Build(store, "/subtask", "12") @@ -169,7 +172,8 @@ func TestCleanUpFiles(t *testing.T) { ctx := context.Background() store := storage.NewMemStorage() w := NewWriterBuilder(). - SetMemorySizeLimit(20). + SetMemorySizeLimit(10*(lengthBytes*2+2)). + SetBlockSize(10*(lengthBytes*2+2)). SetPropSizeDistance(5). SetPropKeysDistance(3). Build(store, "/subtask", "0") @@ -264,26 +268,20 @@ func TestSortedKVMeta(t *testing.T) { }, } meta0 := NewSortedKVMeta(summary[0]) - require.Equal(t, []byte("a"), meta0.MinKey) - require.Equal(t, []byte("b"), meta0.MaxKey) + require.Equal(t, []byte("a"), meta0.StartKey) + require.Equal(t, []byte{'b', 0}, meta0.EndKey) require.Equal(t, uint64(123), meta0.TotalKVSize) - require.Equal(t, []string{"f1", "f2"}, meta0.DataFiles) - require.Equal(t, []string{"stat1", "stat2"}, meta0.StatFiles) require.Equal(t, summary[0].MultipleFilesStats, meta0.MultipleFilesStats) meta1 := NewSortedKVMeta(summary[1]) - require.Equal(t, []byte("x"), meta1.MinKey) - require.Equal(t, []byte("y"), meta1.MaxKey) + require.Equal(t, []byte("x"), meta1.StartKey) + require.Equal(t, []byte{'y', 0}, meta1.EndKey) require.Equal(t, uint64(177), meta1.TotalKVSize) - require.Equal(t, []string{"f3", "f4"}, meta1.DataFiles) - require.Equal(t, []string{"stat3", "stat4"}, meta1.StatFiles) require.Equal(t, summary[1].MultipleFilesStats, meta1.MultipleFilesStats) meta0.MergeSummary(summary[1]) - require.Equal(t, []byte("a"), meta0.MinKey) - require.Equal(t, []byte("y"), meta0.MaxKey) + require.Equal(t, []byte("a"), meta0.StartKey) + require.Equal(t, []byte{'y', 0}, meta0.EndKey) require.Equal(t, uint64(300), meta0.TotalKVSize) - require.Equal(t, []string{"f1", "f2", "f3", "f4"}, meta0.DataFiles) - require.Equal(t, []string{"stat1", "stat2", "stat3", "stat4"}, meta0.StatFiles) mergedStats := append([]MultipleFilesStat{}, summary[0].MultipleFilesStats...) mergedStats = append(mergedStats, summary[1].MultipleFilesStats...) require.Equal(t, mergedStats, meta0.MultipleFilesStats) @@ -294,21 +292,9 @@ func TestSortedKVMeta(t *testing.T) { } func TestKeyMinMax(t *testing.T) { - require.Equal(t, []byte(nil), NotNilMin(nil, nil)) - require.Equal(t, []byte{}, NotNilMin(nil, []byte{})) - require.Equal(t, []byte(nil), NotNilMin([]byte{}, nil)) - require.Equal(t, []byte("a"), NotNilMin([]byte("a"), nil)) - require.Equal(t, []byte("a"), NotNilMin([]byte("a"), []byte{})) - require.Equal(t, []byte("a"), NotNilMin(nil, []byte("a"))) - require.Equal(t, []byte("a"), NotNilMin([]byte("a"), []byte("b"))) - require.Equal(t, []byte("a"), NotNilMin([]byte("b"), []byte("a"))) + require.Equal(t, []byte("a"), BytesMin([]byte("a"), []byte("b"))) + require.Equal(t, []byte("a"), BytesMin([]byte("b"), []byte("a"))) - require.Equal(t, []byte(nil), NotNilMax(nil, nil)) - require.Equal(t, []byte{}, NotNilMax(nil, []byte{})) - require.Equal(t, []byte(nil), NotNilMax([]byte{}, nil)) - require.Equal(t, []byte("a"), NotNilMax([]byte("a"), nil)) - require.Equal(t, []byte("a"), NotNilMax([]byte("a"), []byte{})) - require.Equal(t, []byte("a"), NotNilMax(nil, []byte("a"))) - require.Equal(t, []byte("b"), NotNilMax([]byte("a"), []byte("b"))) - require.Equal(t, []byte("b"), NotNilMax([]byte("b"), []byte("a"))) + require.Equal(t, []byte("b"), BytesMax([]byte("a"), []byte("b"))) + require.Equal(t, []byte("b"), BytesMax([]byte("b"), []byte("a"))) } diff --git a/br/pkg/lightning/backend/external/writer.go b/br/pkg/lightning/backend/external/writer.go index 9612e8a3d11f5..f60814338e57c 100644 --- a/br/pkg/lightning/backend/external/writer.go +++ b/br/pkg/lightning/backend/external/writer.go @@ -17,6 +17,7 @@ package external import ( "bytes" "context" + "encoding/binary" "encoding/hex" "path/filepath" "slices" @@ -99,6 +100,7 @@ func dummyOnCloseFunc(*WriterSummary) {} // WriterBuilder builds a new Writer. type WriterBuilder struct { memSizeLimit uint64 + blockSize int writeBatchCount uint64 propSizeDist uint64 propKeysDist uint64 @@ -114,6 +116,7 @@ type WriterBuilder struct { func NewWriterBuilder() *WriterBuilder { return &WriterBuilder{ memSizeLimit: DefaultMemSizeLimit, + blockSize: DefaultBlockSize, writeBatchCount: 8 * 1024, propSizeDist: 1 * size.MB, propKeysDist: 8 * 1024, @@ -174,6 +177,12 @@ func (b *WriterBuilder) SetMutex(mu *sync.Mutex) *WriterBuilder { return b } +// SetBlockSize sets the block size of pre-allocated buf in the writer. +func (b *WriterBuilder) SetBlockSize(blockSize int) *WriterBuilder { + b.blockSize = blockSize + return b +} + // Build builds a new Writer. The files writer will create are under the prefix // of "{prefix}/{writerID}". func (b *WriterBuilder) Build( @@ -199,8 +208,7 @@ func (b *WriterBuilder) Build( }, memSizeLimit: b.memSizeLimit, store: store, - kvBuffer: bp.NewBuffer(), - writeBatch: make([]common.KvPair, 0, b.writeBatchCount), + kvBuffer: newPreAllocKVBuf(b.memSizeLimit, b.blockSize), currentSeq: 0, filenamePrefix: filenamePrefix, keyAdapter: keyAdapter, @@ -266,6 +274,12 @@ func GetMaxOverlappingTotal(stats []MultipleFilesStat) int64 { return GetMaxOverlapping(points) } +type kvLocation struct { + blockIdx int32 + offset int32 + length int32 +} + // Writer is used to write data into external storage. type Writer struct { store storage.ExternalStorage @@ -279,8 +293,9 @@ type Writer struct { memSizeLimit uint64 - kvBuffer *membuf.Buffer - writeBatch []common.KvPair + kvBuffer *preAllocKVBuf + kvLocations []kvLocation + kvSize int64 onClose OnCloseFunc closed bool @@ -304,22 +319,36 @@ type Writer struct { // WriteRow implements ingest.Writer. func (w *Writer) WriteRow(ctx context.Context, idxKey, idxVal []byte, handle tidbkv.Handle) error { keyAdapter := w.keyAdapter - w.batchSize += uint64(len(idxKey) + len(idxVal)) var rowID []byte if handle != nil { rowID = handle.Encoded() } - buf := w.kvBuffer.AllocBytes(keyAdapter.EncodedLen(idxKey, rowID)) - key := keyAdapter.Encode(buf[:0], idxKey, rowID) - val := w.kvBuffer.AddBytes(idxVal) - - w.writeBatch = append(w.writeBatch, common.KvPair{Key: key, Val: val}) - if w.batchSize >= w.memSizeLimit { + encodedKeyLen := keyAdapter.EncodedLen(idxKey, rowID) + length := encodedKeyLen + len(idxVal) + lengthBytes*2 + blockIdx, dataBuf, off, allocated := w.kvBuffer.Alloc(length) + if !allocated { if err := w.flushKVs(ctx, false); err != nil { return err } + blockIdx, dataBuf, off, allocated = w.kvBuffer.Alloc(length) + // we now don't support KV larger than blockSize + if !allocated { + return errors.Errorf("failed to allocate kv buffer: %d", length) + } } + binary.BigEndian.AppendUint64(dataBuf[:0], uint64(encodedKeyLen)) + keyAdapter.Encode(dataBuf[lengthBytes:lengthBytes:lengthBytes+encodedKeyLen], idxKey, rowID) + binary.BigEndian.AppendUint64(dataBuf[lengthBytes+encodedKeyLen:lengthBytes+encodedKeyLen], uint64(len(idxVal))) + copy(dataBuf[lengthBytes*2+encodedKeyLen:], idxVal) + + w.kvLocations = append(w.kvLocations, kvLocation{ + blockIdx: blockIdx, + offset: off, + length: int32(length)}, + ) + w.kvSize += int64(encodedKeyLen + len(idxVal)) + w.batchSize += uint64(length) return nil } @@ -336,7 +365,7 @@ func (w *Writer) Close(ctx context.Context) error { return errors.Errorf("writer %s has been closed", w.writerID) } w.closed = true - defer w.kvBuffer.Destroy() + defer w.kvBuffer.destroy() err := w.flushKVs(ctx, true) if err != nil { return err @@ -346,10 +375,11 @@ func (w *Writer) Close(ctx context.Context) error { logutil.Logger(ctx).Info("close writer", zap.String("writerID", w.writerID), + zap.Int("kv-cnt-cap", cap(w.kvLocations)), zap.String("minKey", hex.EncodeToString(w.minKey)), zap.String("maxKey", hex.EncodeToString(w.maxKey))) - w.writeBatch = nil + w.kvLocations = nil w.onClose(&WriterSummary{ WriterID: w.writerID, @@ -373,7 +403,7 @@ func (w *Writer) recordMinMax(newMin, newMax tidbkv.Key, size uint64) { } func (w *Writer) flushKVs(ctx context.Context, fromClose bool) (err error) { - if len(w.writeBatch) == 0 { + if len(w.kvLocations) == 0 { return nil } if w.shareMu != nil { @@ -405,7 +435,7 @@ func (w *Writer) flushKVs(ctx context.Context, fromClose bool) (err error) { } return units.HumanSize(float64(n) / dur) } - kvCnt := len(w.writeBatch) + kvCnt := len(w.kvLocations) defer func() { w.currentSeq++ err1, err2 := dataWriter.Close(ctx), statWriter.Close(ctx) @@ -442,18 +472,19 @@ func (w *Writer) flushKVs(ctx context.Context, fromClose bool) (err error) { sortStart := time.Now() if w.shareMu != nil { sorty.MaxGor = min(8, uint64(variable.GetDDLReorgWorkerCounter())) - sorty.Sort(len(w.writeBatch), func(i, j, r, s int) bool { - if bytes.Compare(w.writeBatch[i].Key, w.writeBatch[j].Key) < 0 { + sorty.Sort(len(w.kvLocations), func(i, j, r, s int) bool { + posi, posj := w.kvLocations[i], w.kvLocations[j] + if bytes.Compare(w.getKeyByLoc(posi), w.getKeyByLoc(posj)) < 0 { if r != s { - w.writeBatch[r], w.writeBatch[s] = w.writeBatch[s], w.writeBatch[r] + w.kvLocations[r], w.kvLocations[s] = w.kvLocations[s], w.kvLocations[r] } return true } return false }) } else { - slices.SortFunc(w.writeBatch[:], func(i, j common.KvPair) int { - return bytes.Compare(i.Key, j.Key) + slices.SortFunc(w.kvLocations, func(i, j kvLocation) int { + return bytes.Compare(w.getKeyByLoc(i), w.getKeyByLoc(j)) }) } sortDuration = time.Since(sortStart) @@ -466,13 +497,11 @@ func (w *Writer) flushKVs(ctx context.Context, fromClose bool) (err error) { return err } - var kvSize uint64 - for _, pair := range w.writeBatch { - err = w.kvStore.AddKeyValue(pair.Key, pair.Val) + for _, pair := range w.kvLocations { + err = w.kvStore.addEncodedData(w.getEncodedKVData(pair)) if err != nil { return err } - kvSize += uint64(len(pair.Key)) + uint64(len(pair.Val)) } w.kvStore.Close() @@ -483,7 +512,8 @@ func (w *Writer) flushKVs(ctx context.Context, fromClose bool) (err error) { return err } - w.recordMinMax(w.writeBatch[0].Key, w.writeBatch[len(w.writeBatch)-1].Key, kvSize) + minKey, maxKey := w.getKeyByLoc(w.kvLocations[0]), w.getKeyByLoc(w.kvLocations[len(w.kvLocations)-1]) + w.recordMinMax(minKey, maxKey, uint64(w.kvSize)) // maintain 500-batch statistics @@ -491,8 +521,8 @@ func (w *Writer) flushKVs(ctx context.Context, fromClose bool) (err error) { w.multiFileStats[l-1].Filenames = append(w.multiFileStats[l-1].Filenames, [2]string{dataFile, statFile}, ) - w.fileMinKeys = append(w.fileMinKeys, tidbkv.Key(w.writeBatch[0].Key).Clone()) - w.fileMaxKeys = append(w.fileMaxKeys, tidbkv.Key(w.writeBatch[len(w.writeBatch)-1].Key).Clone()) + w.fileMinKeys = append(w.fileMinKeys, tidbkv.Key(minKey).Clone()) + w.fileMaxKeys = append(w.fileMaxKeys, tidbkv.Key(maxKey).Clone()) if fromClose || len(w.multiFileStats[l-1].Filenames) == multiFileStatNum { w.multiFileStats[l-1].build(w.fileMinKeys, w.fileMaxKeys) w.multiFileStats = append(w.multiFileStats, MultipleFilesStat{ @@ -502,13 +532,25 @@ func (w *Writer) flushKVs(ctx context.Context, fromClose bool) (err error) { w.fileMaxKeys = w.fileMaxKeys[:0] } - w.writeBatch = w.writeBatch[:0] + w.kvLocations = w.kvLocations[:0] + w.kvSize = 0 + w.kvBuffer.reset() w.rc.reset() - w.kvBuffer.Reset() w.batchSize = 0 return nil } +func (w *Writer) getEncodedKVData(pos kvLocation) []byte { + block := w.kvBuffer.blocks[pos.blockIdx] + return block[pos.offset : pos.offset+pos.length] +} + +func (w *Writer) getKeyByLoc(pos kvLocation) []byte { + block := w.kvBuffer.blocks[pos.blockIdx] + keyLen := binary.BigEndian.Uint64(block[pos.offset : pos.offset+lengthBytes]) + return block[pos.offset+lengthBytes : uint64(pos.offset)+lengthBytes+keyLen] +} + func (w *Writer) createStorageWriter(ctx context.Context) ( dataFile, statFile string, data, stats storage.ExternalFileWriter, diff --git a/br/pkg/lightning/backend/external/writer_test.go b/br/pkg/lightning/backend/external/writer_test.go index b3ea9d97887e9..716c10067901b 100644 --- a/br/pkg/lightning/backend/external/writer_test.go +++ b/br/pkg/lightning/backend/external/writer_test.go @@ -110,9 +110,11 @@ func TestWriterFlushMultiFileNames(t *testing.T) { writer := NewWriterBuilder(). SetPropKeysDistance(2). - SetMemorySizeLimit(60). + SetMemorySizeLimit(3*(lengthBytes*2+20)). + SetBlockSize(3*(lengthBytes*2+20)). Build(memStore, "/test", "0") + require.Equal(t, 3*(lengthBytes*2+20), writer.kvBuffer.blockSize) // 200 bytes key values. kvCnt := 10 kvs := make([]common.KvPair, kvCnt) @@ -173,7 +175,7 @@ func TestWriterDuplicateDetect(t *testing.T) { require.NoError(t, err) // test MergeOverlappingFiles will not change duplicate detection functionality. - err = MergeOverlappingFiles( + err = mergeOverlappingFilesImpl( ctx, []string{"/test/0/0"}, memStore, @@ -181,10 +183,12 @@ func TestWriterDuplicateDetect(t *testing.T) { "/test2", "mergeID", 1000, + 1000, 8*1024, 1*size.MB, 2, nil, + false, ) require.NoError(t, err) @@ -269,7 +273,8 @@ func TestWriterMultiFileStat(t *testing.T) { writer := NewWriterBuilder(). SetPropKeysDistance(2). - SetMemorySizeLimit(20). // 2 KV pair will trigger flush + SetMemorySizeLimit(52). + SetBlockSize(52). // 2 KV pair will trigger flush SetOnCloseFunc(closeFn). Build(memStore, "/test", "0") @@ -368,18 +373,20 @@ func TestWriterMultiFileStat(t *testing.T) { allDataFiles[i] = fmt.Sprintf("/test/0/%d", i) } - err = MergeOverlappingFiles( + err = mergeOverlappingFilesImpl( ctx, allDataFiles, memStore, 100, "/test2", "mergeID", - 20, + 52, + 52, 8*1024, 1*size.MB, 2, closeFn, + true, ) require.NoError(t, err) require.Equal(t, 3, len(summary.MultipleFilesStats)) diff --git a/br/pkg/lightning/backend/kv/session.go b/br/pkg/lightning/backend/kv/session.go index 727e4de60ef0a..d6b9add03273c 100644 --- a/br/pkg/lightning/backend/kv/session.go +++ b/br/pkg/lightning/backend/kv/session.go @@ -288,11 +288,12 @@ func NewSession(options *encode.SessionOptions, logger log.Logger) *Session { vars.StmtCtx.BatchCheck = true vars.StmtCtx.BadNullAsWarning = !sqlMode.HasStrictMode() vars.StmtCtx.OverflowAsWarning = !sqlMode.HasStrictMode() - vars.StmtCtx.AllowInvalidDate = sqlMode.HasAllowInvalidDatesMode() - vars.StmtCtx.IgnoreZeroInDate = !sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode() vars.SQLMode = sqlMode - typeFlags := vars.StmtCtx.TypeFlags().WithTruncateAsWarning(!sqlMode.HasStrictMode()) + typeFlags := vars.StmtCtx.TypeFlags(). + WithTruncateAsWarning(!sqlMode.HasStrictMode()). + WithIgnoreInvalidDateErr(sqlMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode()) vars.StmtCtx.SetTypeFlags(typeFlags) if options.SysVars != nil { for k, v := range options.SysVars { @@ -314,9 +315,7 @@ func NewSession(options *encode.SessionOptions, logger log.Logger) *Session { } } vars.StmtCtx.SetTimeZone(vars.Location()) - vars.StmtCtx.SetTypeFlags(types.StrictFlags. - WithClipNegativeToZero(true), - ) + vars.StmtCtx.SetTypeFlags(types.StrictFlags) if err := vars.SetSystemVar("timestamp", strconv.FormatInt(options.Timestamp, 10)); err != nil { logger.Warn("new session: failed to set timestamp", log.ShortError(err)) diff --git a/br/pkg/lightning/backend/local/engine.go b/br/pkg/lightning/backend/local/engine.go index 986e6cff57d3b..61ae643793c03 100644 --- a/br/pkg/lightning/backend/local/engine.go +++ b/br/pkg/lightning/backend/local/engine.go @@ -291,8 +291,12 @@ func (e *Engine) ID() string { } // GetKeyRange implements common.Engine. -func (e *Engine) GetKeyRange() (firstKey []byte, lastKey []byte, err error) { - return e.GetFirstAndLastKey(nil, nil) +func (e *Engine) GetKeyRange() (startKey []byte, endKey []byte, err error) { + firstLey, lastKey, err := e.GetFirstAndLastKey(nil, nil) + if err != nil { + return nil, nil, errors.Trace(err) + } + return firstLey, nextKey(lastKey), nil } // SplitRanges gets size properties from pebble and split ranges according to size/keys limit. @@ -1001,6 +1005,9 @@ func (e *Engine) GetFirstAndLastKey(lowerBound, upperBound []byte) ([]byte, []by LowerBound: lowerBound, UpperBound: upperBound, } + failpoint.Inject("mockGetFirstAndLastKey", func() { + failpoint.Return(lowerBound, upperBound, nil) + }) iter := e.newKVIter(context.Background(), opt) //nolint: errcheck diff --git a/br/pkg/lightning/backend/local/local.go b/br/pkg/lightning/backend/local/local.go index 0f8610c3aebd5..575c25e2488b8 100644 --- a/br/pkg/lightning/backend/local/local.go +++ b/br/pkg/lightning/backend/local/local.go @@ -961,17 +961,19 @@ func (local *Backend) CloseEngine(ctx context.Context, cfg *backend.EngineConfig store, externalCfg.DataFiles, externalCfg.StatFiles, - externalCfg.MinKey, - externalCfg.MaxKey, + externalCfg.StartKey, + externalCfg.EndKey, externalCfg.SplitKeys, externalCfg.RegionSplitSize, local.keyAdapter, local.DupeDetectEnabled, local.duplicateDB, local.DuplicateDetectOpt, + local.WorkerConcurrency, ts, externalCfg.TotalFileSize, externalCfg.TotalKVCount, + externalCfg.CheckHotspot, ) local.externalEngine[engineUUID] = externalEngine return nil @@ -1073,28 +1075,26 @@ func readAndSplitIntoRange( sizeLimit int64, keysLimit int64, ) ([]common.Range, error) { - firstKey, lastKey, err := engine.GetKeyRange() + startKey, endKey, err := engine.GetKeyRange() if err != nil { return nil, err } - if firstKey == nil { + if startKey == nil { return nil, errors.New("could not find first pair") } - endKey := nextKey(lastKey) - engineFileTotalSize, engineFileLength := engine.KVStatistics() if engineFileTotalSize <= sizeLimit && engineFileLength <= keysLimit { - ranges := []common.Range{{Start: firstKey, End: endKey}} + ranges := []common.Range{{Start: startKey, End: endKey}} return ranges, nil } logger := log.FromContext(ctx).With(zap.String("engine", engine.ID())) - ranges, err := engine.SplitRanges(firstKey, endKey, sizeLimit, keysLimit, logger) + ranges, err := engine.SplitRanges(startKey, endKey, sizeLimit, keysLimit, logger) logger.Info("split engine key ranges", zap.Int64("totalSize", engineFileTotalSize), zap.Int64("totalCount", engineFileLength), - logutil.Key("firstKey", firstKey), logutil.Key("lastKey", lastKey), + logutil.Key("startKey", startKey), logutil.Key("endKey", endKey), zap.Int("ranges", len(ranges)), zap.Error(err)) return ranges, err } @@ -1201,6 +1201,12 @@ func (local *Backend) generateAndSendJob( } failpoint.Inject("beforeGenerateJob", nil) + failpoint.Inject("sendDummyJob", func(_ failpoint.Value) { + // this is used to trigger worker failure, used together + // with WriteToTiKVNotEnoughDiskSpace + jobToWorkerCh <- ®ionJob{} + time.Sleep(5 * time.Second) + }) jobs, err := local.generateJobForRange(egCtx, p.Data, p.Range, regionSplitSize, regionSplitKeys) if err != nil { if common.IsContextCanceledError(err) { @@ -1434,7 +1440,6 @@ func (local *Backend) executeJob( // if it's retryable error, we retry from scanning region log.FromContext(ctx).Warn("meet retryable error when writing to TiKV", log.ShortError(err), zap.Stringer("job stage", job.stage)) - job.convertStageTo(needRescan) job.lastRetryableErr = err return nil } @@ -1680,29 +1685,30 @@ func (local *Backend) doImport(ctx context.Context, engine common.Engine, region failpoint.Label("afterStartWorker") - err := local.prepareAndSendJob( - workerCtx, - engine, - regionRanges, - regionSplitSize, - regionSplitKeys, - jobToWorkerCh, - &jobWg, - ) - if err != nil { - firstErr.Set(err) + workGroup.Go(func() error { + err := local.prepareAndSendJob( + workerCtx, + engine, + regionRanges, + regionSplitSize, + regionSplitKeys, + jobToWorkerCh, + &jobWg, + ) + if err != nil { + return err + } + + jobWg.Wait() workerCancel() - err2 := workGroup.Wait() - if !common.IsContextCanceledError(err2) { - log.FromContext(ctx).Error("worker meets error", zap.Error(err2)) + return nil + }) + if err := workGroup.Wait(); err != nil { + if !common.IsContextCanceledError(err) { + log.FromContext(ctx).Error("do import meets error", zap.Error(err)) } - return firstErr.Get() + firstErr.Set(err) } - - jobWg.Wait() - workerCancel() - firstErr.Set(workGroup.Wait()) - firstErr.Set(ctx.Err()) return firstErr.Get() } diff --git a/br/pkg/lightning/backend/local/local_test.go b/br/pkg/lightning/backend/local/local_test.go index e86fa421bc531..93afa912de596 100644 --- a/br/pkg/lightning/backend/local/local_test.go +++ b/br/pkg/lightning/backend/local/local_test.go @@ -1788,9 +1788,11 @@ func TestSplitRangeAgain4BigRegionExternalEngine(t *testing.T) { false, nil, common.DupDetectOpt{}, + 10, 123, 456, 789, + true, ) jobCh := make(chan *regionJob, 10) @@ -2224,6 +2226,43 @@ func TestCtxCancelIsIgnored(t *testing.T) { require.ErrorContains(t, err, "the remaining storage capacity of TiKV") } +func TestWorkerFailedWhenGeneratingJobs(t *testing.T) { + backup := maxRetryBackoffSecond + maxRetryBackoffSecond = 1 + t.Cleanup(func() { + maxRetryBackoffSecond = backup + }) + + _ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter", "return()") + _ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/sendDummyJob", "return()") + _ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/mockGetFirstAndLastKey", "return()") + _ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/WriteToTiKVNotEnoughDiskSpace", "return()") + t.Cleanup(func() { + _ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter") + _ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/sendDummyJob") + _ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/mockGetFirstAndLastKey") + _ = failpoint.Disable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/WriteToTiKVNotEnoughDiskSpace") + }) + + initRanges := []common.Range{ + {Start: []byte{'c'}, End: []byte{'d'}}, + } + + ctx := context.Background() + l := &Backend{ + BackendConfig: BackendConfig{ + WorkerConcurrency: 1, + }, + splitCli: initTestSplitClient( + [][]byte{{1}, {11}}, + panicSplitRegionClient{}, + ), + } + e := &Engine{} + err := l.doImport(ctx, e, initRanges, int64(config.SplitRegionSize), int64(config.SplitRegionKeys)) + require.ErrorContains(t, err, "the remaining storage capacity of TiKV") +} + func TestExternalEngine(t *testing.T) { _ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipSplitAndScatter", "return()") _ = failpoint.Enable("github.com/pingcap/tidb/br/pkg/lightning/backend/local/skipStartWorker", "return()") @@ -2257,8 +2296,8 @@ func TestExternalEngine(t *testing.T) { StorageURI: storageURI, DataFiles: dataFiles, StatFiles: statFiles, - MinKey: keys[0], - MaxKey: keys[99], + StartKey: keys[0], + EndKey: endKey, SplitKeys: [][]byte{keys[30], keys[60], keys[90]}, TotalFileSize: int64(config.SplitRegionSize) + 1, TotalKVCount: int64(config.SplitRegionKeys) + 1, diff --git a/br/pkg/lightning/backend/local/localhelper_test.go b/br/pkg/lightning/backend/local/localhelper_test.go index 80d0980b05e75..0ee5f9b9a6fca 100644 --- a/br/pkg/lightning/backend/local/localhelper_test.go +++ b/br/pkg/lightning/backend/local/localhelper_test.go @@ -252,6 +252,11 @@ func (c *testSplitClient) GetOperator(ctx context.Context, regionID uint64) (*pd func (c *testSplitClient) ScanRegions(ctx context.Context, key, endKey []byte, limit int) ([]*split.RegionInfo, error) { c.mu.Lock() defer c.mu.Unlock() + + if err := ctx.Err(); err != nil { + return nil, err + } + if c.hook != nil { key, endKey, limit = c.hook.BeforeScanRegions(ctx, key, endKey, limit) } diff --git a/br/pkg/lightning/backend/local/region_job.go b/br/pkg/lightning/backend/local/region_job.go index 3d50fa32d5a20..210a0b1ba69fd 100644 --- a/br/pkg/lightning/backend/local/region_job.go +++ b/br/pkg/lightning/backend/local/region_job.go @@ -18,6 +18,7 @@ import ( "container/heap" "context" "fmt" + "io" "strings" "sync" "time" @@ -179,12 +180,29 @@ func (j *regionJob) done(wg *sync.WaitGroup) { } // writeToTiKV writes the data to TiKV and mark this job as wrote stage. -// if any write logic has error, writeToTiKV will set job to a proper stage and return nil. TODO: <-check this +// if any write logic has error, writeToTiKV will set job to a proper stage and return nil. // if any underlying logic has error, writeToTiKV will return an error. // we don't need to do cleanup for the pairs written to tikv if encounters an error, // tikv will take the responsibility to do so. // TODO: let client-go provide a high-level write interface. func (local *Backend) writeToTiKV(ctx context.Context, j *regionJob) error { + err := local.doWrite(ctx, j) + if err == nil { + return nil + } + if !common.IsRetryableError(err) { + return err + } + // currently only one case will restart write + if strings.Contains(err.Error(), "RequestTooNew") { + j.convertStageTo(regionScanned) + return err + } + j.convertStageTo(needRescan) + return err +} + +func (local *Backend) doWrite(ctx context.Context, j *regionJob) error { if j.stage != regionScanned { return nil } @@ -238,9 +256,25 @@ func (local *Backend) writeToTiKV(ctx context.Context, j *regionJob) error { ApiVersion: apiVersion, } - annotateErr := func(in error, peer *metapb.Peer) error { + failpoint.Inject("changeEpochVersion", func(val failpoint.Value) { + cloned := *meta.RegionEpoch + meta.RegionEpoch = &cloned + i := val.(int) + if i >= 0 { + meta.RegionEpoch.Version += uint64(i) + } else { + meta.RegionEpoch.ConfVer -= uint64(-i) + } + }) + + annotateErr := func(in error, peer *metapb.Peer, msg string) error { // annotate the error with peer/store/region info to help debug. - return errors.Annotatef(in, "peer %d, store %d, region %d, epoch %s", peer.Id, peer.StoreId, region.Id, region.RegionEpoch.String()) + return errors.Annotatef( + in, + "peer %d, store %d, region %d, epoch %s, %s", + peer.Id, peer.StoreId, region.Id, region.RegionEpoch.String(), + msg, + ) } leaderID := j.region.Leader.GetId() @@ -260,17 +294,17 @@ func (local *Backend) writeToTiKV(ctx context.Context, j *regionJob) error { for _, peer := range region.GetPeers() { cli, err := clientFactory.Create(ctx, peer.StoreId) if err != nil { - return annotateErr(err, peer) + return annotateErr(err, peer, "when create client") } wstream, err := cli.Write(ctx) if err != nil { - return annotateErr(err, peer) + return annotateErr(err, peer, "when open write stream") } // Bind uuid for this write request if err = wstream.Send(req); err != nil { - return annotateErr(err, peer) + return annotateErr(err, peer, "when send meta") } clients = append(clients, wstream) allPeers = append(allPeers, peer) @@ -309,7 +343,12 @@ func (local *Backend) writeToTiKV(ctx context.Context, j *regionJob) error { return errors.Trace(err) } if err := clients[i].SendMsg(preparedMsg); err != nil { - return annotateErr(err, allPeers[i]) + if err == io.EOF { + // if it's EOF, need RecvMsg to get the error + dummy := &sst.WriteResponse{} + err = clients[i].RecvMsg(dummy) + } + return annotateErr(err, allPeers[i], "when send data") } } failpoint.Inject("afterFlushKVs", func() { @@ -383,10 +422,10 @@ func (local *Backend) writeToTiKV(ctx context.Context, j *regionJob) error { for i, wStream := range clients { resp, closeErr := wStream.CloseAndRecv() if closeErr != nil { - return annotateErr(closeErr, allPeers[i]) + return annotateErr(closeErr, allPeers[i], "when close write stream") } if resp.Error != nil { - return annotateErr(errors.New(resp.Error.Message), allPeers[i]) + return annotateErr(errors.New("resp error: "+resp.Error.Message), allPeers[i], "when close write stream") } if leaderID == region.Peers[i].GetId() { leaderPeerMetas = resp.Metas diff --git a/br/pkg/lightning/backend/tidb/tidb.go b/br/pkg/lightning/backend/tidb/tidb.go index 4109c27bc834f..1108808197c4f 100644 --- a/br/pkg/lightning/backend/tidb/tidb.go +++ b/br/pkg/lightning/backend/tidb/tidb.go @@ -455,7 +455,7 @@ func (enc *tidbEncoder) appendSQL(sb *strings.Builder, datum *types.Datum, _ *ta case types.KindMysqlBit: var buffer [20]byte - intValue, err := datum.GetBinaryLiteral().ToInt(types.DefaultNoWarningContext) + intValue, err := datum.GetBinaryLiteral().ToInt(types.DefaultStmtNoWarningContext) if err != nil { return err } diff --git a/br/pkg/lightning/common/BUILD.bazel b/br/pkg/lightning/common/BUILD.bazel index 725c79ae1f337..e654a11916ad6 100644 --- a/br/pkg/lightning/common/BUILD.bazel +++ b/br/pkg/lightning/common/BUILD.bazel @@ -129,6 +129,7 @@ go_test( "//pkg/testkit/testsetup", "//pkg/util/dbutil", "//pkg/util/mock", + "//pkg/util/pdapi", "@com_github_data_dog_go_sqlmock//:go-sqlmock", "@com_github_go_sql_driver_mysql//:mysql", "@com_github_pingcap_errors//:errors", diff --git a/br/pkg/lightning/common/engine.go b/br/pkg/lightning/common/engine.go index 136e4edd0aa86..559f0058e37ab 100644 --- a/br/pkg/lightning/common/engine.go +++ b/br/pkg/lightning/common/engine.go @@ -38,8 +38,8 @@ type Engine interface { KVStatistics() (totalKVSize int64, totalKVCount int64) // ImportedStatistics returns the imported kv size and imported kv count. ImportedStatistics() (importedKVSize int64, importedKVCount int64) - // GetKeyRange returns the key range of the engine. Both are inclusive. - GetKeyRange() (firstKey []byte, lastKey []byte, err error) + // GetKeyRange returns the key range [startKey, endKey) of the engine. + GetKeyRange() (startKey []byte, endKey []byte, err error) // SplitRanges splits the range [startKey, endKey) into multiple ranges. SplitRanges(startKey, endKey []byte, sizeLimit, keysLimit int64, logger log.Logger) ([]Range, error) Close() error diff --git a/br/pkg/lightning/common/retry.go b/br/pkg/lightning/common/retry.go index 19afcd2f90c46..789c4f2d20a32 100644 --- a/br/pkg/lightning/common/retry.go +++ b/br/pkg/lightning/common/retry.go @@ -79,6 +79,8 @@ var retryableErrorIDs = map[errors.ErrorID]struct{}{ ErrKVReadIndexNotReady.ID(): {}, ErrKVIngestFailed.ID(): {}, ErrKVRaftProposalDropped.ID(): {}, + // litBackendCtxMgr.Register may return the error. + ErrCreatePDClient.ID(): {}, // during checksum coprocessor will transform error into driver error in handleCopResponse using ToTiDBErr // met ErrRegionUnavailable on free-tier import during checksum, others hasn't met yet drivererr.ErrRegionUnavailable.ID(): {}, @@ -103,7 +105,9 @@ func isSingleRetryableError(err error) bool { if nerr.Timeout() { return true } - if syscallErr, ok := goerrors.Unwrap(err).(*os.SyscallError); ok { + // the error might be nested, such as *url.Error -> *net.OpError -> *os.SyscallError + var syscallErr *os.SyscallError + if goerrors.As(nerr, &syscallErr) { return syscallErr.Err == syscall.ECONNREFUSED || syscallErr.Err == syscall.ECONNRESET } return false @@ -138,6 +142,8 @@ func isSingleRetryableError(err error) bool { // 2. in write TiKV: rpc error: code = Unknown desc = EngineTraits(Engine(Status { code: IoError, sub_code: // None, sev: NoError, state: \"IO error: No such file or directory: while stat a file for size: // /...../63992d9c-fbc8-4708-b963-32495b299027_32279707_325_5280_write.sst: No such file or directory\" + // 3. in write TiKV: rpc error: code = Unknown desc = Engine("request region 26 is staler than local region, + // local epoch conf_ver: 5 version: 65, request epoch conf_ver: 5 version: 64, please rescan region later") return true default: return false diff --git a/br/pkg/lightning/common/retry_test.go b/br/pkg/lightning/common/retry_test.go index 939f4bb956942..114e500b3334c 100644 --- a/br/pkg/lightning/common/retry_test.go +++ b/br/pkg/lightning/common/retry_test.go @@ -19,6 +19,7 @@ import ( "fmt" "io" "net" + "net/url" "testing" "github.com/go-sql-driver/mysql" @@ -66,6 +67,9 @@ func TestIsRetryableError(t *testing.T) { _, err := net.Dial("tcp", "localhost:65533") require.Error(t, err) require.True(t, IsRetryableError(err)) + // wrap net.OpErr inside url.Error + urlErr := &url.Error{Op: "post", Err: err} + require.True(t, IsRetryableError(urlErr)) // MySQL Errors require.False(t, IsRetryableError(&mysql.MySQLError{})) diff --git a/br/pkg/lightning/common/security_test.go b/br/pkg/lightning/common/security_test.go index 4ba9825efc883..4b4e86c54006d 100644 --- a/br/pkg/lightning/common/security_test.go +++ b/br/pkg/lightning/common/security_test.go @@ -16,6 +16,7 @@ package common_test import ( "context" + "fmt" "io" "net/http" "net/http/httptest" @@ -25,6 +26,7 @@ import ( "testing" "github.com/pingcap/tidb/br/pkg/lightning/common" + "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/stretchr/testify/require" ) @@ -92,8 +94,8 @@ func TestWithHost(t *testing.T) { false, }, { - "http://127.0.0.1:2379/pd/api/v1/stores", - "127.0.0.1:2379/pd/api/v1/stores", + fmt.Sprintf("http://127.0.0.1:2379%s", pdapi.Stores), + fmt.Sprintf("127.0.0.1:2379%s", pdapi.Stores), false, }, { diff --git a/br/pkg/lightning/importer/BUILD.bazel b/br/pkg/lightning/importer/BUILD.bazel index d9562bf523b44..f15d462919d4a 100644 --- a/br/pkg/lightning/importer/BUILD.bazel +++ b/br/pkg/lightning/importer/BUILD.bazel @@ -73,6 +73,7 @@ go_library( "//pkg/util/engine", "//pkg/util/extsort", "//pkg/util/mock", + "//pkg/util/pdapi", "//pkg/util/regexpr-router", "//pkg/util/set", "@com_github_coreos_go_semver//semver", @@ -161,6 +162,7 @@ go_test( "//pkg/util/dbutil", "//pkg/util/extsort", "//pkg/util/mock", + "//pkg/util/pdapi", "//pkg/util/promutil", "//pkg/util/table-filter", "//pkg/util/table-router", diff --git a/br/pkg/lightning/importer/check_info.go b/br/pkg/lightning/importer/check_info.go index 1dd5f784409e0..8a9a5a1103c00 100644 --- a/br/pkg/lightning/importer/check_info.go +++ b/br/pkg/lightning/importer/check_info.go @@ -26,10 +26,6 @@ import ( ) const ( - pdStores = "/pd/api/v1/stores" - pdReplicate = "/pd/api/v1/config/replicate" - pdEmptyRegions = "/pd/api/v1/regions/check/empty-region" - defaultCSVSize = 10 * units.GiB maxSampleDataSize = 10 * 1024 * 1024 maxSampleRowCount = 10 * 1024 diff --git a/br/pkg/lightning/importer/get_pre_info.go b/br/pkg/lightning/importer/get_pre_info.go index d36bd39b4f937..2e1f3cb980f4e 100644 --- a/br/pkg/lightning/importer/get_pre_info.go +++ b/br/pkg/lightning/importer/get_pre_info.go @@ -50,6 +50,7 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/mock" + "github.com/pingcap/tidb/pkg/util/pdapi" pd "github.com/tikv/pd/client" "go.uber.org/zap" "golang.org/x/exp/maps" @@ -236,7 +237,7 @@ func (g *TargetInfoGetterImpl) GetTargetSysVariablesForImport(ctx context.Contex // It uses the PD interface through TLS to get the information. func (g *TargetInfoGetterImpl) GetReplicationConfig(ctx context.Context) (*pdtypes.ReplicationConfig, error) { result := new(pdtypes.ReplicationConfig) - if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdReplicate, &result); err != nil { + if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdapi.ReplicateConfig, &result); err != nil { return nil, errors.Trace(err) } return result, nil @@ -247,7 +248,7 @@ func (g *TargetInfoGetterImpl) GetReplicationConfig(ctx context.Context) (*pdtyp // It uses the PD interface through TLS to get the information. func (g *TargetInfoGetterImpl) GetStorageInfo(ctx context.Context) (*pdtypes.StoresInfo, error) { result := new(pdtypes.StoresInfo) - if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdStores, result); err != nil { + if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdapi.Stores, result); err != nil { return nil, errors.Trace(err) } return result, nil @@ -258,7 +259,7 @@ func (g *TargetInfoGetterImpl) GetStorageInfo(ctx context.Context) (*pdtypes.Sto // It uses the PD interface through TLS to get the information. func (g *TargetInfoGetterImpl) GetEmptyRegionsInfo(ctx context.Context) (*pdtypes.RegionsInfo, error) { result := new(pdtypes.RegionsInfo) - if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdEmptyRegions, &result); err != nil { + if err := g.tls.WithHost(g.pdCli.GetLeaderAddr()).GetJSON(ctx, pdapi.EmptyRegions, &result); err != nil { return nil, errors.Trace(err) } return result, nil diff --git a/br/pkg/lightning/importer/table_import_test.go b/br/pkg/lightning/importer/table_import_test.go index 4a01a2632c602..4660004a8d0b5 100644 --- a/br/pkg/lightning/importer/table_import_test.go +++ b/br/pkg/lightning/importer/table_import_test.go @@ -64,6 +64,7 @@ import ( "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" tmock "github.com/pingcap/tidb/pkg/util/mock" + "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/pingcap/tidb/pkg/util/promutil" filter "github.com/pingcap/tidb/pkg/util/table-filter" "github.com/stretchr/testify/require" @@ -1323,9 +1324,9 @@ func (s *tableRestoreSuite) TestCheckClusterRegion() { for i, ca := range testCases { server := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { var err error - if req.URL.Path == pdStores { + if req.URL.Path == pdapi.Stores { _, err = w.Write(mustMarshal(ca.stores)) - } else if req.URL.Path == pdEmptyRegions { + } else if req.URL.Path == pdapi.EmptyRegions { _, err = w.Write(mustMarshal(ca.emptyRegions)) } else { w.WriteHeader(http.StatusNotFound) diff --git a/br/pkg/lightning/mydump/loader.go b/br/pkg/lightning/mydump/loader.go index 091567463cbf7..630a40e015a24 100644 --- a/br/pkg/lightning/mydump/loader.go +++ b/br/pkg/lightning/mydump/loader.go @@ -705,7 +705,8 @@ func calculateFileBytes(ctx context.Context, } defer reader.Close() - compressReader, err := storage.NewLimitedInterceptReader(reader, compressType, storage.DecompressConfig{}, offset) + decompressConfig := storage.DecompressConfig{ZStdDecodeConcurrency: 1} + compressReader, err := storage.NewLimitedInterceptReader(reader, compressType, decompressConfig, offset) if err != nil { return 0, 0, errors.Trace(err) } diff --git a/br/pkg/lightning/tikv/BUILD.bazel b/br/pkg/lightning/tikv/BUILD.bazel index 076999ab0fd51..457e04b66f79d 100644 --- a/br/pkg/lightning/tikv/BUILD.bazel +++ b/br/pkg/lightning/tikv/BUILD.bazel @@ -13,6 +13,7 @@ go_library( "//br/pkg/version", "//pkg/kv", "//pkg/parser/model", + "//pkg/util/pdapi", "@com_github_coreos_go_semver//semver", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_kvproto//pkg/debugpb", @@ -36,6 +37,7 @@ go_test( deps = [ ":tikv", "//br/pkg/lightning/common", + "//pkg/util/pdapi", "@com_github_coreos_go_semver//semver", "@com_github_pingcap_kvproto//pkg/import_sstpb", "@com_github_stretchr_testify//require", diff --git a/br/pkg/lightning/tikv/tikv.go b/br/pkg/lightning/tikv/tikv.go index 22aaf5db4e2b9..9a1e674ac992e 100644 --- a/br/pkg/lightning/tikv/tikv.go +++ b/br/pkg/lightning/tikv/tikv.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/br/pkg/version" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/tikv/client-go/v2/util" "go.uber.org/zap" "golang.org/x/sync/errgroup" @@ -124,7 +125,7 @@ func ForAllStores( Store Store } } - err := tls.GetJSON(ctx, "/pd/api/v1/stores", &stores) + err := tls.GetJSON(ctx, pdapi.Stores, &stores) if err != nil { return err } diff --git a/br/pkg/lightning/tikv/tikv_test.go b/br/pkg/lightning/tikv/tikv_test.go index 6c8d7a976f1d4..a4b192595b4b4 100644 --- a/br/pkg/lightning/tikv/tikv_test.go +++ b/br/pkg/lightning/tikv/tikv_test.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/kvproto/pkg/import_sstpb" "github.com/pingcap/tidb/br/pkg/lightning/common" kv "github.com/pingcap/tidb/br/pkg/lightning/tikv" + "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/stretchr/testify/require" ) @@ -175,7 +176,7 @@ func TestCheckPDVersion(t *testing.T) { ctx := context.Background() mockServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - require.Equal(t, "/pd/api/v1/version", req.URL.Path) + require.Equal(t, pdapi.Version, req.URL.Path) w.WriteHeader(http.StatusOK) _, err := w.Write([]byte(version)) require.NoError(t, err) @@ -229,7 +230,7 @@ func TestCheckTiKVVersion(t *testing.T) { ctx := context.Background() mockServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { - require.Equal(t, "/pd/api/v1/stores", req.URL.Path) + require.Equal(t, pdapi.Stores, req.URL.Path) w.WriteHeader(http.StatusOK) stores := make([]map[string]interface{}, 0, len(versions)) diff --git a/br/pkg/pdutil/BUILD.bazel b/br/pkg/pdutil/BUILD.bazel index b941c500fc046..24ce8ea2e809f 100644 --- a/br/pkg/pdutil/BUILD.bazel +++ b/br/pkg/pdutil/BUILD.bazel @@ -15,6 +15,7 @@ go_library( "//pkg/store/pdtypes", "//pkg/tablecodec", "//pkg/util/codec", + "//pkg/util/pdapi", "@com_github_coreos_go_semver//semver", "@com_github_docker_go_units//:go-units", "@com_github_google_uuid//:uuid", @@ -42,6 +43,7 @@ go_test( "//pkg/store/pdtypes", "//pkg/testkit/testsetup", "//pkg/util/codec", + "//pkg/util/pdapi", "@com_github_coreos_go_semver//semver", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/metapb", diff --git a/br/pkg/pdutil/pd.go b/br/pkg/pdutil/pd.go index 9d403017c69d6..ea057ce839a40 100644 --- a/br/pkg/pdutil/pd.go +++ b/br/pkg/pdutil/pd.go @@ -31,29 +31,17 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/pkg/store/pdtypes" "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/pkg/util/pdapi" pd "github.com/tikv/pd/client" "go.uber.org/zap" "google.golang.org/grpc" ) const ( - clusterVersionPrefix = "pd/api/v1/config/cluster-version" - regionCountPrefix = "pd/api/v1/stats/region" - storePrefix = "pd/api/v1/store" - schedulerPrefix = "pd/api/v1/schedulers" - resetTSPrefix = "pd/api/v1/admin/reset-ts" - recoveringMarkPrefix = "pd/api/v1/admin/cluster/markers/snapshot-recovering" - baseAllocIDPrefix = "pd/api/v1/admin/base-alloc-id" - minResolvedTSPrefix = "pd/api/v1/min-resolved-ts" - regionLabelPrefix = "pd/api/v1/config/region-label/rule" - maxMsgSize = int(128 * units.MiB) // pd.ScanRegion may return a large response - scheduleConfigPrefix = "pd/api/v1/config/schedule" - configPrefix = "pd/api/v1/config" - pauseTimeout = 5 * time.Minute - + maxMsgSize = int(128 * units.MiB) // pd.ScanRegion may return a large response + pauseTimeout = 5 * time.Minute // pd request retry time when connection fail - pdRequestRetryTime = 10 - + pdRequestRetryTime = 120 // set max-pending-peer-count to a large value to avoid scatter region failed. maxPendingPeerUnlimited uint64 = math.MaxInt32 ) @@ -163,14 +151,18 @@ func pdRequestWithCode( if err != nil { return 0, nil, errors.Trace(err) } - reqURL := fmt.Sprintf("%s/%s", u, prefix) - req, err := http.NewRequestWithContext(ctx, method, reqURL, body) - if err != nil { - return 0, nil, errors.Trace(err) - } - var resp *http.Response + reqURL := fmt.Sprintf("%s%s", u, prefix) + var ( + req *http.Request + resp *http.Response + ) count := 0 + // the total retry duration: 120*1 = 2min for { + req, err = http.NewRequestWithContext(ctx, method, reqURL, body) + if err != nil { + return 0, nil, errors.Trace(err) + } resp, err = cli.Do(req) //nolint:bodyclose count++ failpoint.Inject("InjectClosed", func(v failpoint.Value) { @@ -270,7 +262,7 @@ func NewPdController( } } processedAddrs = append(processedAddrs, addr) - versionBytes, failure = pdRequest(ctx, addr, clusterVersionPrefix, cli, http.MethodGet, nil) + versionBytes, failure = pdRequest(ctx, addr, pdapi.ClusterVersion, cli, http.MethodGet, nil) if failure == nil { break } @@ -367,7 +359,7 @@ func (p *PdController) GetClusterVersion(ctx context.Context) (string, error) { func (p *PdController) getClusterVersionWith(ctx context.Context, get pdHTTPRequest) (string, error) { var err error for _, addr := range p.getAllPDAddrs() { - v, e := get(ctx, addr, clusterVersionPrefix, p.cli, http.MethodGet, nil) + v, e := get(ctx, addr, pdapi.ClusterVersion, p.cli, http.MethodGet, nil) if e != nil { err = e continue @@ -394,10 +386,7 @@ func (p *PdController) getRegionCountWith( } var err error for _, addr := range p.getAllPDAddrs() { - query := fmt.Sprintf( - "%s?start_key=%s&end_key=%s", - regionCountPrefix, start, end) - v, e := get(ctx, addr, query, p.cli, http.MethodGet, nil) + v, e := get(ctx, addr, pdapi.RegionStatsByStartEndKey(start, end), p.cli, http.MethodGet, nil) if e != nil { err = e continue @@ -421,10 +410,7 @@ func (p *PdController) getStoreInfoWith( ctx context.Context, get pdHTTPRequest, storeID uint64) (*pdtypes.StoreInfo, error) { var err error for _, addr := range p.getAllPDAddrs() { - query := fmt.Sprintf( - "%s/%d", - storePrefix, storeID) - v, e := get(ctx, addr, query, p.cli, http.MethodGet, nil) + v, e := get(ctx, addr, pdapi.StoreByID(storeID), p.cli, http.MethodGet, nil) if e != nil { err = e continue @@ -449,9 +435,8 @@ func (p *PdController) doPauseSchedulers(ctx context.Context, // PauseSchedulers remove pd scheduler temporarily. removedSchedulers := make([]string, 0, len(schedulers)) for _, scheduler := range schedulers { - prefix := fmt.Sprintf("%s/%s", schedulerPrefix, scheduler) for _, addr := range p.getAllPDAddrs() { - _, err = post(ctx, addr, prefix, p.cli, http.MethodPost, bytes.NewBuffer(body)) + _, err = post(ctx, addr, pdapi.SchedulerByName(scheduler), p.cli, http.MethodPost, bytes.NewBuffer(body)) if err == nil { removedSchedulers = append(removedSchedulers, scheduler) break @@ -532,9 +517,8 @@ func (p *PdController) resumeSchedulerWith(ctx context.Context, schedulers []str return errors.Trace(err) } for _, scheduler := range schedulers { - prefix := fmt.Sprintf("%s/%s", schedulerPrefix, scheduler) for _, addr := range p.getAllPDAddrs() { - _, err = post(ctx, addr, prefix, p.cli, http.MethodPost, bytes.NewBuffer(body)) + _, err = post(ctx, addr, pdapi.SchedulerByName(scheduler), p.cli, http.MethodPost, bytes.NewBuffer(body)) if err == nil { break } @@ -558,7 +542,7 @@ func (p *PdController) ListSchedulers(ctx context.Context) ([]string, error) { func (p *PdController) listSchedulersWith(ctx context.Context, get pdHTTPRequest) ([]string, error) { var err error for _, addr := range p.getAllPDAddrs() { - v, e := get(ctx, addr, schedulerPrefix, p.cli, http.MethodGet, nil) + v, e := get(ctx, addr, pdapi.Schedulers, p.cli, http.MethodGet, nil) if e != nil { err = e continue @@ -581,7 +565,7 @@ func (p *PdController) GetPDScheduleConfig( var err error for _, addr := range p.getAllPDAddrs() { v, e := pdRequest( - ctx, addr, scheduleConfigPrefix, p.cli, http.MethodGet, nil) + ctx, addr, pdapi.ScheduleConfig, p.cli, http.MethodGet, nil) if e != nil { err = e continue @@ -605,7 +589,7 @@ func (p *PdController) UpdatePDScheduleConfig(ctx context.Context) error { func (p *PdController) doUpdatePDScheduleConfig( ctx context.Context, cfg map[string]interface{}, post pdHTTPRequest, prefixs ...string, ) error { - prefix := configPrefix + prefix := pdapi.Config if len(prefixs) != 0 { prefix = prefixs[0] } @@ -634,8 +618,7 @@ func (p *PdController) doUpdatePDScheduleConfig( func (p *PdController) doPauseConfigs(ctx context.Context, cfg map[string]interface{}, post pdHTTPRequest) error { // pause this scheduler with 300 seconds - prefix := fmt.Sprintf("%s?ttlSecond=%.0f", configPrefix, pauseTimeout.Seconds()) - return p.doUpdatePDScheduleConfig(ctx, cfg, post, prefix) + return p.doUpdatePDScheduleConfig(ctx, cfg, post, pdapi.ConfigWithTTLSeconds(pauseTimeout.Seconds())) } func restoreSchedulers(ctx context.Context, pd *PdController, clusterCfg ClusterConfig, @@ -657,7 +640,7 @@ func restoreSchedulers(ctx context.Context, pd *PdController, clusterCfg Cluster prefix := make([]string, 0, 1) if pd.isPauseConfigEnabled() { // set config's ttl to zero, make temporary config invalid immediately. - prefix = append(prefix, fmt.Sprintf("%s?ttlSecond=%d", configPrefix, 0)) + prefix = append(prefix, pdapi.ConfigWithTTLSeconds(0)) } // reset config with previous value. if err := pd.doUpdatePDScheduleConfig(ctx, mergeCfg, pdRequest, prefix...); err != nil { @@ -846,7 +829,7 @@ func (p *PdController) doRemoveSchedulersWith( func (p *PdController) GetMinResolvedTS(ctx context.Context) (uint64, error) { var err error for _, addr := range p.getAllPDAddrs() { - v, e := pdRequest(ctx, addr, minResolvedTSPrefix, p.cli, http.MethodGet, nil) + v, e := pdRequest(ctx, addr, pdapi.MinResolvedTS, p.cli, http.MethodGet, nil) if e != nil { log.Warn("failed to get min resolved ts", zap.String("addr", addr), zap.Error(e)) err = e @@ -880,7 +863,7 @@ func (p *PdController) RecoverBaseAllocID(ctx context.Context, id uint64) error }) var err error for _, addr := range p.getAllPDAddrs() { - _, e := pdRequest(ctx, addr, baseAllocIDPrefix, p.cli, http.MethodPost, bytes.NewBuffer(reqData)) + _, e := pdRequest(ctx, addr, pdapi.BaseAllocID, p.cli, http.MethodPost, bytes.NewBuffer(reqData)) if e != nil { log.Warn("failed to recover base alloc id", zap.String("addr", addr), zap.Error(e)) err = e @@ -904,7 +887,7 @@ func (p *PdController) ResetTS(ctx context.Context, ts uint64) error { }) var err error for _, addr := range p.getAllPDAddrs() { - code, _, e := pdRequestWithCode(ctx, addr, resetTSPrefix, p.cli, http.MethodPost, bytes.NewBuffer(reqData)) + code, _, e := pdRequestWithCode(ctx, addr, pdapi.ResetTS, p.cli, http.MethodPost, bytes.NewBuffer(reqData)) if e != nil { // for pd version <= 6.2, if the given ts < current ts of pd, pd returns StatusForbidden. // it's not an error for br @@ -934,7 +917,7 @@ func (p *PdController) UnmarkRecovering(ctx context.Context) error { func (p *PdController) operateRecoveringMark(ctx context.Context, method string) error { var err error for _, addr := range p.getAllPDAddrs() { - _, e := pdRequest(ctx, addr, recoveringMarkPrefix, p.cli, method, nil) + _, e := pdRequest(ctx, addr, pdapi.SnapshotRecoveringMark, p.cli, method, nil) if e != nil { log.Warn("failed to operate recovering mark", zap.String("method", method), zap.String("addr", addr), zap.Error(e)) @@ -980,7 +963,7 @@ func (p *PdController) CreateOrUpdateRegionLabelRule(ctx context.Context, rule L var lastErr error addrs := p.getAllPDAddrs() for i, addr := range addrs { - _, lastErr = pdRequest(ctx, addr, regionLabelPrefix, + _, lastErr = pdRequest(ctx, addr, pdapi.RegionLabelRule, p.cli, http.MethodPost, bytes.NewBuffer(reqData)) if lastErr == nil { return nil @@ -1002,7 +985,7 @@ func (p *PdController) DeleteRegionLabelRule(ctx context.Context, ruleID string) var lastErr error addrs := p.getAllPDAddrs() for i, addr := range addrs { - _, lastErr = pdRequest(ctx, addr, fmt.Sprintf("%s/%s", regionLabelPrefix, ruleID), + _, lastErr = pdRequest(ctx, addr, fmt.Sprintf("%s/%s", pdapi.RegionLabelRule, ruleID), p.cli, http.MethodDelete, nil) if lastErr == nil { return nil @@ -1106,7 +1089,7 @@ func FetchPDVersion(ctx context.Context, tls *common.TLS, pdAddr string) (*semve var rawVersion struct { Version string `json:"version"` } - err := tls.WithHost(pdAddr).GetJSON(ctx, "/pd/api/v1/version", &rawVersion) + err := tls.WithHost(pdAddr).GetJSON(ctx, pdapi.Version, &rawVersion) if err != nil { return nil, errors.Trace(err) } diff --git a/br/pkg/pdutil/pd_serial_test.go b/br/pkg/pdutil/pd_serial_test.go index 32f69106b2139..67a37c072b834 100644 --- a/br/pkg/pdutil/pd_serial_test.go +++ b/br/pkg/pdutil/pd_serial_test.go @@ -3,6 +3,7 @@ package pdutil import ( + "bytes" "context" "encoding/hex" "encoding/json" @@ -22,6 +23,7 @@ import ( "github.com/pingcap/kvproto/pkg/metapb" "github.com/pingcap/tidb/pkg/store/pdtypes" "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/stretchr/testify/require" ) @@ -186,8 +188,16 @@ func TestPDRequestRetry(t *testing.T) { w.WriteHeader(http.StatusOK) })) cli := http.DefaultClient + cli.Transport = http.DefaultTransport.(*http.Transport).Clone() + // although the real code doesn't disable keep alive, we need to disable it + // in test to avoid the connection being reused and #47930 can't appear. The + // real code will only meet #47930 when go's internal http client just dropped + // all idle connections. + cli.Transport.(*http.Transport).DisableKeepAlives = true + taddr := ts.URL - _, reqErr := pdRequest(ctx, taddr, "", cli, http.MethodGet, nil) + body := bytes.NewBuffer([]byte("test")) + _, reqErr := pdRequest(ctx, taddr, "", cli, http.MethodPost, body) require.NoError(t, reqErr) ts.Close() count = 0 @@ -261,8 +271,9 @@ func TestStoreInfo(t *testing.T) { mock := func( _ context.Context, addr string, prefix string, _ *http.Client, _ string, _ io.Reader, ) ([]byte, error) { - query := fmt.Sprintf("%s/%s", addr, prefix) - require.Equal(t, "http://mock/pd/api/v1/store/1", query) + require.Equal(t, + fmt.Sprintf("http://mock%s", pdapi.StoreByID(1)), + fmt.Sprintf("%s%s", addr, prefix)) ret, err := json.Marshal(storeInfo) require.NoError(t, err) return ret, nil @@ -295,7 +306,7 @@ func TestPauseSchedulersByKeyRange(t *testing.T) { return } if r.Method == http.MethodDelete { - ruleID := strings.TrimPrefix(r.URL.Path, "/"+regionLabelPrefix+"/") + ruleID := strings.TrimPrefix(r.URL.Path, pdapi.RegionLabelRule+"/") delete(labelExpires, ruleID) deleted = true return diff --git a/br/pkg/pdutil/utils.go b/br/pkg/pdutil/utils.go index 41f13b9edd437..548a155b3362a 100644 --- a/br/pkg/pdutil/utils.go +++ b/br/pkg/pdutil/utils.go @@ -8,6 +8,7 @@ import ( "crypto/tls" "encoding/hex" "encoding/json" + "fmt" "net/http" "github.com/pingcap/errors" @@ -16,6 +17,7 @@ import ( "github.com/pingcap/tidb/pkg/store/pdtypes" "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/pkg/util/pdapi" ) // UndoFunc is a 'undo' operation of some undoable command. @@ -25,10 +27,6 @@ type UndoFunc func(context.Context) error // Nop is the 'zero value' of undo func. var Nop UndoFunc = func(context.Context) error { return nil } -const ( - placementRuleURL = "/pd/api/v1/config/rules" -) - // GetPlacementRules return the current placement rules. func GetPlacementRules(ctx context.Context, pdAddr string, tlsConf *tls.Config) ([]pdtypes.Rule, error) { cli := httputil.NewClient(tlsConf) @@ -36,7 +34,7 @@ func GetPlacementRules(ctx context.Context, pdAddr string, tlsConf *tls.Config) if tlsConf != nil { prefix = "https://" } - reqURL := prefix + pdAddr + placementRuleURL + reqURL := fmt.Sprintf("%s%s%s", prefix, pdAddr, pdapi.PlacementRules) req, err := http.NewRequestWithContext(ctx, "GET", reqURL, nil) if err != nil { return nil, errors.Trace(err) diff --git a/br/pkg/restore/ingestrec/BUILD.bazel b/br/pkg/restore/ingestrec/BUILD.bazel index 53b905b4f0ac9..a8fd65359d18b 100644 --- a/br/pkg/restore/ingestrec/BUILD.bazel +++ b/br/pkg/restore/ingestrec/BUILD.bazel @@ -21,7 +21,7 @@ go_test( deps = [ ":ingestrec", "//pkg/parser/model", - "@com_github_pkg_errors//:errors", + "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", ], ) diff --git a/br/pkg/restore/ingestrec/ingest_recorder_test.go b/br/pkg/restore/ingestrec/ingest_recorder_test.go index 1c31e9f8ee1c1..eaacde6e73c1c 100644 --- a/br/pkg/restore/ingestrec/ingest_recorder_test.go +++ b/br/pkg/restore/ingestrec/ingest_recorder_test.go @@ -18,9 +18,9 @@ import ( "encoding/json" "testing" + "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/restore/ingestrec" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pkg/errors" "github.com/stretchr/testify/require" ) diff --git a/br/pkg/restore/split/BUILD.bazel b/br/pkg/restore/split/BUILD.bazel index 41969eb7324cc..3a24fca4275af 100644 --- a/br/pkg/restore/split/BUILD.bazel +++ b/br/pkg/restore/split/BUILD.bazel @@ -20,6 +20,7 @@ go_library( "//br/pkg/utils", "//pkg/kv", "//pkg/store/pdtypes", + "//pkg/util/pdapi", "@com_github_google_btree//:btree", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/br/pkg/restore/split/client.go b/br/pkg/restore/split/client.go index 1b471e42e201a..99dfe88bd39fd 100644 --- a/br/pkg/restore/split/client.go +++ b/br/pkg/restore/split/client.go @@ -11,7 +11,6 @@ import ( "io" "net/http" "path" - "strconv" "strings" "sync" "time" @@ -30,6 +29,7 @@ import ( "github.com/pingcap/tidb/br/pkg/lightning/config" "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/pkg/store/pdtypes" + "github.com/pingcap/tidb/pkg/util/pdapi" pd "github.com/tikv/pd/client" "go.uber.org/multierr" "go.uber.org/zap" @@ -463,8 +463,7 @@ func (c *pdClient) getStoreCount(ctx context.Context) (int, error) { func (c *pdClient) getMaxReplica(ctx context.Context) (int, error) { api := c.getPDAPIAddr() - configAPI := api + "/pd/api/v1/config/replicate" - req, err := http.NewRequestWithContext(ctx, "GET", configAPI, nil) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf("%s%s", api, pdapi.ReplicateConfig), nil) if err != nil { return 0, errors.Trace(err) } @@ -541,7 +540,7 @@ func (c *pdClient) GetPlacementRule(ctx context.Context, groupID, ruleID string) return rule, errors.Annotate(berrors.ErrRestoreSplitFailed, "failed to add stores labels: no leader") } req, err := http.NewRequestWithContext(ctx, "GET", - addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + addr+path.Join(pdapi.PlacementRule, groupID, ruleID), nil) if err != nil { return rule, errors.Trace(err) } @@ -572,7 +571,7 @@ func (c *pdClient) SetPlacementRule(ctx context.Context, rule pdtypes.Rule) erro } m, _ := json.Marshal(rule) req, err := http.NewRequestWithContext(ctx, "POST", - addr+path.Join("/pd/api/v1/config/rule"), bytes.NewReader(m)) + addr+path.Join(pdapi.PlacementRule), bytes.NewReader(m)) if err != nil { return errors.Trace(err) } @@ -588,7 +587,7 @@ func (c *pdClient) DeletePlacementRule(ctx context.Context, groupID, ruleID stri if addr == "" { return errors.Annotate(berrors.ErrPDLeaderNotFound, "failed to add stores labels") } - req, err := http.NewRequestWithContext(ctx, "DELETE", addr+path.Join("/pd/api/v1/config/rule", groupID, ruleID), nil) + req, err := http.NewRequestWithContext(ctx, "DELETE", addr+path.Join(pdapi.PlacementRule, groupID, ruleID), nil) if err != nil { return errors.Trace(err) } @@ -611,7 +610,7 @@ func (c *pdClient) SetStoresLabel( for _, id := range stores { req, err := http.NewRequestWithContext( ctx, "POST", - addr+path.Join("/pd/api/v1/store", strconv.FormatUint(id, 10), "label"), + addr+pdapi.StoreLabelByID(id), bytes.NewReader(b), ) if err != nil { diff --git a/br/pkg/storage/ks3.go b/br/pkg/storage/ks3.go index 42c4786c306df..b917146b6f06a 100644 --- a/br/pkg/storage/ks3.go +++ b/br/pkg/storage/ks3.go @@ -673,9 +673,10 @@ func (rs *KS3Storage) Create(ctx context.Context, name string, option *WriterOpt s3Writer.wg.Add(1) go func() { _, err := up.Upload(upParams) - err1 := rd.Close() + // like a channel we only let sender close the pipe in happy path if err != nil { - log.Warn("upload to s3 failed", zap.String("filename", name), zap.Error(err), zap.Error(err1)) + log.Warn("upload to ks3 failed", zap.String("filename", name), zap.Error(err)) + _ = rd.CloseWithError(err) } s3Writer.err = err s3Writer.wg.Done() diff --git a/br/pkg/storage/s3.go b/br/pkg/storage/s3.go index 14ff8fc717fb5..97f58ec5b89fd 100644 --- a/br/pkg/storage/s3.go +++ b/br/pkg/storage/s3.go @@ -1044,9 +1044,10 @@ func (rs *S3Storage) Create(ctx context.Context, name string, option *WriterOpti s3Writer.wg.Add(1) go func() { _, err := up.UploadWithContext(ctx, upParams) - err1 := rd.Close() + // like a channel we only let sender close the pipe in happy path if err != nil { - log.Warn("upload to s3 failed", zap.String("filename", name), zap.Error(err), zap.Error(err1)) + log.Warn("upload to s3 failed", zap.String("filename", name), zap.Error(err)) + _ = rd.CloseWithError(err) } s3Writer.err = err s3Writer.wg.Done() diff --git a/br/pkg/storage/s3_test.go b/br/pkg/storage/s3_test.go index d1f5d21bc617d..829e2049bdbcd 100644 --- a/br/pkg/storage/s3_test.go +++ b/br/pkg/storage/s3_test.go @@ -484,6 +484,24 @@ func TestWriteNoError(t *testing.T) { require.NoError(t, err) } +func TestMultiUploadErrorNotOverwritten(t *testing.T) { + s := createS3Suite(t) + ctx := aws.BackgroundContext() + + s.s3.EXPECT(). + CreateMultipartUploadWithContext(ctx, gomock.Any(), gomock.Any()). + Return(nil, errors.New("mock error")) + + w, err := s.storage.Create(ctx, "file", &WriterOption{Concurrency: 2}) + require.NoError(t, err) + // data should be larger than 5MB to trigger CreateMultipartUploadWithContext path + data := make([]byte, 5*1024*1024+6716) + n, err := w.Write(ctx, data) + require.NoError(t, err) + require.Equal(t, 5*1024*1024+6716, n) + require.ErrorContains(t, w.Close(ctx), "mock error") +} + // TestReadNoError ensures the ReadFile API issues a GetObject request and correctly // read the entire body. func TestReadNoError(t *testing.T) { diff --git a/br/pkg/stream/rewrite_meta_rawkv.go b/br/pkg/stream/rewrite_meta_rawkv.go index 55086f17d99d4..5366b60150d40 100644 --- a/br/pkg/stream/rewrite_meta_rawkv.go +++ b/br/pkg/stream/rewrite_meta_rawkv.go @@ -23,6 +23,7 @@ import ( backuppb "github.com/pingcap/kvproto/pkg/brpb" "github.com/pingcap/log" berrors "github.com/pingcap/tidb/br/pkg/errors" + "github.com/pingcap/tidb/br/pkg/logutil" "github.com/pingcap/tidb/br/pkg/restore/ingestrec" "github.com/pingcap/tidb/br/pkg/restore/tiflashrec" "github.com/pingcap/tidb/pkg/kv" @@ -705,10 +706,11 @@ func (sr *SchemasReplace) restoreFromHistory(job *model.Job, isSubJob bool) erro } func (sr *SchemasReplace) deleteRange(job *model.Job) error { + lctx := logutil.ContextWithField(context.Background(), logutil.RedactAny("category", "ddl: rewrite delete range")) dbReplace, exist := sr.DbMap[job.SchemaID] if !exist { // skip this mddljob, the same below - log.Debug("try to drop a non-existent range, missing oldDBID", zap.Int64("oldDBID", job.SchemaID)) + logutil.CL(lctx).Warn("try to drop a non-existent range, missing oldDBID", zap.Int64("oldDBID", job.SchemaID)) return nil } @@ -744,14 +746,14 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { newTableIDs := make([]int64, 0, len(tableIDs)) for tableID, tableReplace := range dbReplace.TableMap { if _, exist := argsSet[tableID]; !exist { - log.Debug("DropSchema: record a table, but it doesn't exist in job args", + logutil.CL(lctx).Warn("DropSchema: record a table, but it doesn't exist in job args", zap.Int64("oldTableID", tableID)) continue } newTableIDs = append(newTableIDs, tableReplace.TableID) for partitionID, newPartitionID := range tableReplace.PartitionMap { if _, exist := argsSet[partitionID]; !exist { - log.Debug("DropSchema: record a partition, but it doesn't exist in job args", + logutil.CL(lctx).Warn("DropSchema: record a partition, but it doesn't exist in job args", zap.Int64("oldPartitionID", partitionID)) continue } @@ -760,7 +762,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { } if len(newTableIDs) != len(tableIDs) { - log.Debug( + logutil.CL(lctx).Warn( "DropSchema: try to drop a non-existent table/partition, whose oldID doesn't exist in tableReplace") // only drop newTableIDs' ranges } @@ -774,7 +776,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { case model.ActionDropTable, model.ActionTruncateTable: tableReplace, exist := dbReplace.TableMap[job.TableID] if !exist { - log.Debug("DropTable/TruncateTable: try to drop a non-existent table, missing oldTableID", + logutil.CL(lctx).Warn("DropTable/TruncateTable: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) return nil } @@ -787,18 +789,19 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { return errors.Trace(err) } if len(physicalTableIDs) > 0 { - // delete partition id instead of table id - for i := 0; i < len(physicalTableIDs); i++ { - newPid, exist := tableReplace.PartitionMap[physicalTableIDs[i]] + newPhysicalTableIDs := make([]int64, 0, len(physicalTableIDs)) + // delete partition id + for _, oldPid := range physicalTableIDs { + newPid, exist := tableReplace.PartitionMap[oldPid] if !exist { - log.Debug("DropTable/TruncateTable: try to drop a non-existent table, missing oldPartitionID", - zap.Int64("oldPartitionID", physicalTableIDs[i])) + logutil.CL(lctx).Warn("DropTable/TruncateTable: try to drop a non-existent table, missing oldPartitionID", + zap.Int64("oldPartitionID", oldPid)) continue } - physicalTableIDs[i] = newPid + newPhysicalTableIDs = append(newPhysicalTableIDs, newPid) } - if len(physicalTableIDs) > 0 { - sr.insertDeleteRangeForTable(newJobID, physicalTableIDs) + if len(newPhysicalTableIDs) > 0 { + sr.insertDeleteRangeForTable(newJobID, newPhysicalTableIDs) } return nil } @@ -808,7 +811,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { case model.ActionDropTablePartition, model.ActionTruncateTablePartition: tableReplace, exist := dbReplace.TableMap[job.TableID] if !exist { - log.Debug( + logutil.CL(lctx).Warn( "DropTablePartition/TruncateTablePartition: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) return nil @@ -818,18 +821,19 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { return errors.Trace(err) } - for i := 0; i < len(physicalTableIDs); i++ { - newPid, exist := tableReplace.PartitionMap[physicalTableIDs[i]] + newPhysicalTableIDs := make([]int64, 0, len(physicalTableIDs)) + for _, oldPid := range physicalTableIDs { + newPid, exist := tableReplace.PartitionMap[oldPid] if !exist { - log.Debug( + logutil.CL(lctx).Warn( "DropTablePartition/TruncateTablePartition: try to drop a non-existent table, missing oldPartitionID", - zap.Int64("oldPartitionID", physicalTableIDs[i])) + zap.Int64("oldPartitionID", oldPid)) continue } - physicalTableIDs[i] = newPid + newPhysicalTableIDs = append(newPhysicalTableIDs, newPid) } - if len(physicalTableIDs) > 0 { - sr.insertDeleteRangeForTable(newJobID, physicalTableIDs) + if len(newPhysicalTableIDs) > 0 { + sr.insertDeleteRangeForTable(newJobID, newPhysicalTableIDs) } return nil // ActionAddIndex, ActionAddPrimaryKey needs do it, because it needs to be rolled back when it's canceled. @@ -837,7 +841,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { // iff job.State = model.JobStateRollbackDone tableReplace, exist := dbReplace.TableMap[job.TableID] if !exist { - log.Debug("AddIndex/AddPrimaryKey roll-back: try to drop a non-existent table, missing oldTableID", + logutil.CL(lctx).Warn("AddIndex/AddPrimaryKey roll-back: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) return nil } @@ -856,7 +860,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { for _, oldPid := range partitionIDs { newPid, exist := tableReplace.PartitionMap[oldPid] if !exist { - log.Debug( + logutil.CL(lctx).Warn( "AddIndex/AddPrimaryKey roll-back: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) continue @@ -871,7 +875,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { case model.ActionDropIndex, model.ActionDropPrimaryKey: tableReplace, exist := dbReplace.TableMap[job.TableID] if !exist { - log.Debug("DropIndex/DropPrimaryKey: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) + logutil.CL(lctx).Warn("DropIndex/DropPrimaryKey: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) return nil } @@ -890,7 +894,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { for _, oldPid := range partitionIDs { newPid, exist := tableReplace.PartitionMap[oldPid] if !exist { - log.Debug("DropIndex/DropPrimaryKey: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) + logutil.CL(lctx).Warn("DropIndex/DropPrimaryKey: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) continue } // len(indexIDs) = 1 @@ -913,7 +917,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { tableReplace, exist := dbReplace.TableMap[job.TableID] if !exist { - log.Debug("DropIndexes: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) + logutil.CL(lctx).Warn("DropIndexes: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) return nil } @@ -922,7 +926,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { for _, oldPid := range partitionIDs { newPid, exist := tableReplace.PartitionMap[oldPid] if !exist { - log.Debug("DropIndexes: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) + logutil.CL(lctx).Warn("DropIndexes: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) continue } sr.insertDeleteRangeForIndex(newJobID, &elementID, newPid, indexIDs) @@ -942,7 +946,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { if len(indexIDs) > 0 { tableReplace, exist := dbReplace.TableMap[job.TableID] if !exist { - log.Debug("DropColumn: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) + logutil.CL(lctx).Warn("DropColumn: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) return nil } @@ -951,7 +955,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { for _, oldPid := range partitionIDs { newPid, exist := tableReplace.PartitionMap[oldPid] if !exist { - log.Debug("DropColumn: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) + logutil.CL(lctx).Warn("DropColumn: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) continue } sr.insertDeleteRangeForIndex(newJobID, &elementID, newPid, indexIDs) @@ -972,7 +976,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { if len(indexIDs) > 0 { tableReplace, exist := dbReplace.TableMap[job.TableID] if !exist { - log.Debug("DropColumns: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) + logutil.CL(lctx).Warn("DropColumns: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) return nil } @@ -981,7 +985,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { for _, oldPid := range partitionIDs { newPid, exist := tableReplace.PartitionMap[oldPid] if !exist { - log.Debug("DropColumns: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) + logutil.CL(lctx).Warn("DropColumns: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) continue } sr.insertDeleteRangeForIndex(newJobID, &elementID, newPid, indexIDs) @@ -1001,7 +1005,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { } tableReplace, exist := dbReplace.TableMap[job.TableID] if !exist { - log.Debug("DropColumn: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) + logutil.CL(lctx).Warn("DropColumn: try to drop a non-existent table, missing oldTableID", zap.Int64("oldTableID", job.TableID)) return nil } @@ -1010,7 +1014,7 @@ func (sr *SchemasReplace) deleteRange(job *model.Job) error { for _, oldPid := range partitionIDs { newPid, exist := tableReplace.PartitionMap[oldPid] if !exist { - log.Debug("DropColumn: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) + logutil.CL(lctx).Warn("DropColumn: try to drop a non-existent table, missing oldPartitionID", zap.Int64("oldPartitionID", oldPid)) continue } sr.insertDeleteRangeForIndex(newJobID, &elementID, newPid, indexIDs) diff --git a/br/pkg/stream/util_test.go b/br/pkg/stream/util_test.go index 2562c9ce15840..6dda62a04ad60 100644 --- a/br/pkg/stream/util_test.go +++ b/br/pkg/stream/util_test.go @@ -23,6 +23,10 @@ func TestDateFormat(t *testing.T) { 434605479096221697, "2022-07-15 20:32:12.734 +0800", }, + { + 434605478903808000, + "2022-07-15 20:32:12 +0800", + }, } timeZone, _ := time.LoadLocation("Asia/Shanghai") diff --git a/br/pkg/streamhelper/integration_test.go b/br/pkg/streamhelper/integration_test.go index f856ca74d14a4..ff7c23b138fbe 100644 --- a/br/pkg/streamhelper/integration_test.go +++ b/br/pkg/streamhelper/integration_test.go @@ -50,8 +50,8 @@ func runEtcd(t *testing.T) (*embed.Etcd, *clientv3.Client) { cfg := embed.NewConfig() cfg.Dir = t.TempDir() clientURL := getRandomLocalAddr() - cfg.LCUrls = []url.URL{clientURL} - cfg.LPUrls = []url.URL{getRandomLocalAddr()} + cfg.ListenClientUrls = []url.URL{clientURL} + cfg.ListenPeerUrls = []url.URL{getRandomLocalAddr()} cfg.LogLevel = "fatal" etcd, err := embed.StartEtcd(cfg) if err != nil { diff --git a/br/pkg/task/backup.go b/br/pkg/task/backup.go index 497791b315d42..4b49ed0e86700 100644 --- a/br/pkg/task/backup.go +++ b/br/pkg/task/backup.go @@ -773,7 +773,7 @@ func ParseTSString(ts string, tzCheck bool) (uint64, error) { return 0, errors.Errorf("must set timezone when using datetime format ts, e.g. '2018-05-11 01:42:23+0800'") } } - t, err := types.ParseTime(sc, ts, mysql.TypeTimestamp, types.MaxFsp, nil) + t, err := types.ParseTime(sc.TypeCtx(), ts, mysql.TypeTimestamp, types.MaxFsp, nil) if err != nil { return 0, errors.Trace(err) } diff --git a/br/pkg/task/common.go b/br/pkg/task/common.go index e17d81aedb32b..1aec04510bbc6 100644 --- a/br/pkg/task/common.go +++ b/br/pkg/task/common.go @@ -80,6 +80,7 @@ const ( flagDryRun = "dry-run" // TODO used for local test, should be removed later flagSkipAWS = "skip-aws" + flagUseFSR = "use-fsr" flagCloudAPIConcurrency = "cloud-api-concurrency" flagWithSysTable = "with-sys-table" flagOperatorPausedGCAndSchedulers = "operator-paused-gc-and-scheduler" diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index d0f79845cb369..b70788c821ef8 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -139,6 +139,7 @@ func DefineRestoreCommonFlags(flags *pflag.FlagSet) { "batch size for ddl to create a batch of tables once.") flags.Bool(flagWithSysTable, false, "whether restore system privilege tables on default setting") flags.StringArrayP(FlagResetSysUsers, "", []string{"cloud_admin", "root"}, "whether reset these users after restoration") + flags.Bool(flagUseFSR, false, "whether enable FSR for AWS snapshots") _ = flags.MarkHidden(FlagResetSysUsers) _ = flags.MarkHidden(FlagMergeRegionSizeBytes) _ = flags.MarkHidden(FlagMergeRegionKeyCount) @@ -218,6 +219,7 @@ type RestoreConfig struct { VolumeThroughput int64 `json:"volume-throughput" toml:"volume-throughput"` ProgressFile string `json:"progress-file" toml:"progress-file"` TargetAZ string `json:"target-az" toml:"target-az"` + UseFSR bool `json:"use-fsr" toml:"use-fsr"` } // DefineRestoreFlags defines common flags for the restore tidb command. @@ -391,6 +393,11 @@ func (cfg *RestoreConfig) ParseFromFlags(flags *pflag.FlagSet) error { return errors.Trace(err) } + cfg.UseFSR, err = flags.GetBool(flagUseFSR) + if err != nil { + return errors.Trace(err) + } + // iops: gp3 [3,000-16,000]; io1/io2 [100-32,000] // throughput: gp3 [125, 1000]; io1/io2 cannot set throughput // io1 and io2 volumes support up to 64,000 IOPS only on Instances built on the Nitro System. diff --git a/br/pkg/task/restore_data.go b/br/pkg/task/restore_data.go index 357e56672e894..3276a0f2af101 100644 --- a/br/pkg/task/restore_data.go +++ b/br/pkg/task/restore_data.go @@ -159,23 +159,17 @@ func RunResolveKvData(c context.Context, g glue.Glue, cmdName string, cfg *Resto //TODO: restore volume type into origin type //ModifyVolume(*ec2.ModifyVolumeInput) (*ec2.ModifyVolumeOutput, error) by backupmeta - // this is used for cloud restoration + err = client.Init(g, mgr.GetStorage()) if err != nil { return errors.Trace(err) } defer client.Close() - log.Info("start to clear system user for cloud") - err = client.ClearSystemUsers(ctx, cfg.ResetSysUsers) - - if err != nil { - return errors.Trace(err) - } - // since we cannot reset tiflash automaticlly. so we should start it manually if err = client.ResetTiFlashReplicas(ctx, g, mgr.GetStorage()); err != nil { return errors.Trace(err) } + progress.Close() summary.CollectDuration("restore duration", time.Since(startAll)) summary.SetSuccessStatus(true) diff --git a/br/pkg/task/restore_ebs_meta.go b/br/pkg/task/restore_ebs_meta.go index 53286505b5b9c..7dbad5960cb17 100644 --- a/br/pkg/task/restore_ebs_meta.go +++ b/br/pkg/task/restore_ebs_meta.go @@ -175,10 +175,10 @@ func (h *restoreEBSMetaHelper) restore() error { return errors.Trace(err) } - storeCount := h.metaInfo.GetStoreCount() - progress := h.g.StartProgress(ctx, h.cmdName, int64(storeCount), !h.cfg.LogProgress) + volumeCount := h.metaInfo.GetStoreCount() * h.metaInfo.GetTiKVVolumeCount() + progress := h.g.StartProgress(ctx, h.cmdName, int64(volumeCount), !h.cfg.LogProgress) defer progress.Close() - go progressFileWriterRoutine(ctx, progress, int64(storeCount), h.cfg.ProgressFile) + go progressFileWriterRoutine(ctx, progress, int64(volumeCount), h.cfg.ProgressFile) resolvedTs = h.metaInfo.ClusterInfo.ResolvedTS if totalSize, err = h.doRestore(ctx, progress); err != nil { @@ -226,6 +226,8 @@ func (h *restoreEBSMetaHelper) restoreVolumes(progress glue.Progress) (map[strin volumeIDMap = make(map[string]string) err error totalSize int64 + // a map whose key is available zone, and value is the snapshot id array + snapshotsIDsMap = make(map[string][]*string) ) ec2Session, err = aws.NewEC2Session(h.cfg.CloudAPIConcurrency, h.cfg.S3.Region) if err != nil { @@ -236,7 +238,21 @@ func (h *restoreEBSMetaHelper) restoreVolumes(progress glue.Progress) (map[strin log.Error("failed to create all volumes, cleaning up created volume") ec2Session.DeleteVolumes(volumeIDMap) } + + if h.cfg.UseFSR { + err = ec2Session.DisableDataFSR(snapshotsIDsMap) + log.Error("disable fsr failed", zap.Error(err)) + } }() + + // Turn on FSR for TiKV data snapshots + if h.cfg.UseFSR { + snapshotsIDsMap, err = ec2Session.EnableDataFSR(h.metaInfo, h.cfg.TargetAZ) + if err != nil { + return nil, 0, errors.Trace(err) + } + } + volumeIDMap, err = ec2Session.CreateVolumes(h.metaInfo, string(h.cfg.VolumeType), h.cfg.VolumeIOPS, h.cfg.VolumeThroughput, h.cfg.TargetAZ) if err != nil { diff --git a/br/tests/br_pitr/incremental_data/delete_range.sql b/br/tests/br_pitr/incremental_data/delete_range.sql new file mode 100644 index 0000000000000..f5afde943649e --- /dev/null +++ b/br/tests/br_pitr/incremental_data/delete_range.sql @@ -0,0 +1,25 @@ +-- 1. Drop Schema +drop database db_to_be_dropped; +-- 2. Drop/Truncate Table +drop table table_to_be_dropped_or_truncated.t0_dropped; +drop table table_to_be_dropped_or_truncated.t1_dropped; +truncate table table_to_be_dropped_or_truncated.t0_truncated; +truncate table table_to_be_dropped_or_truncated.t1_truncated; +-- 3. Drop/Truncate Table Partition +alter table partition_to_be_dropped_or_truncated.t1_dropped drop partition p0; +alter table partition_to_be_dropped_or_truncated.t1_truncated truncate partition p0; +-- 4. Drop Table Index/PrimaryKey +alter table index_or_primarykey_to_be_dropped.t0 drop index k1; +alter table index_or_primarykey_to_be_dropped.t1 drop index k1; +alter table index_or_primarykey_to_be_dropped.t0 drop primary key; +alter table index_or_primarykey_to_be_dropped.t1 drop primary key; +-- 5. Drop Table Indexes +alter table indexes_to_be_dropped.t0 drop index k1, drop index k2; +alter table indexes_to_be_dropped.t1 drop index k1, drop index k2; +-- 6. Drop Table Column/Columns +alter table column_s_to_be_dropped.t0_column drop column name; +alter table column_s_to_be_dropped.t1_column drop column name; +alter table column_s_to_be_dropped.t0_columns drop column name, drop column c; +alter table column_s_to_be_dropped.t1_columns drop column name, drop column c; +-- 7. Modify Table Column +alter table column_to_be_modified.t0 modify column name varchar(25); diff --git a/br/tests/br_pitr/prepare_data/delete_range.sql b/br/tests/br_pitr/prepare_data/delete_range.sql new file mode 100644 index 0000000000000..e2a20be9e45fa --- /dev/null +++ b/br/tests/br_pitr/prepare_data/delete_range.sql @@ -0,0 +1,124 @@ +-- 1. Drop Schema +create database db_to_be_dropped; +create table db_to_be_dropped.t0(id int primary key, c int, name char(20)); +create table db_to_be_dropped.t1(id int primary key, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); + +create index k1 on db_to_be_dropped.t0 (name); +create index k2 on db_to_be_dropped.t0(c); +create index k1 on db_to_be_dropped.t1(name); +create index k2 on db_to_be_dropped.t1(c); +create index k3 on db_to_be_dropped.t1 (id, c); + +insert into db_to_be_dropped.t0 values (1, 2, "123"), (2, 3, "123"); +insert into db_to_be_dropped.t1 values (1, 2, "123"), (2, 3, "123"); +-- 2. Drop/Truncate Table +create database table_to_be_dropped_or_truncated; +create table table_to_be_dropped_or_truncated.t0_dropped(id int primary key, c int, name char(20)); +create table table_to_be_dropped_or_truncated.t1_dropped(id int primary key, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); +create table table_to_be_dropped_or_truncated.t0_truncated(id int primary key, c int, name char(20)); +create table table_to_be_dropped_or_truncated.t1_truncated(id int primary key, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); + +create index k1 on table_to_be_dropped_or_truncated.t0_dropped (name); +create index k2 on table_to_be_dropped_or_truncated.t0_dropped (c); +create index k1 on table_to_be_dropped_or_truncated.t1_dropped (name); +create index k2 on table_to_be_dropped_or_truncated.t1_dropped (c); +create index k3 on table_to_be_dropped_or_truncated.t1_dropped (id, c); + +create index k1 on table_to_be_dropped_or_truncated.t0_truncated (name); +create index k2 on table_to_be_dropped_or_truncated.t0_truncated (c); +create index k1 on table_to_be_dropped_or_truncated.t1_truncated (name); +create index k2 on table_to_be_dropped_or_truncated.t1_truncated (c); +create index k3 on table_to_be_dropped_or_truncated.t1_truncated (id, c); + +insert into table_to_be_dropped_or_truncated.t0_dropped values (1, 2, "123"), (2, 3, "123"); +insert into table_to_be_dropped_or_truncated.t1_dropped values (1, 2, "123"), (2, 3, "123"); + +insert into table_to_be_dropped_or_truncated.t0_truncated values (1, 2, "123"), (2, 3, "123"); +insert into table_to_be_dropped_or_truncated.t1_truncated values (1, 2, "123"), (2, 3, "123"); + +-- 3. Drop/Truncate Table Partition +create database partition_to_be_dropped_or_truncated; +create table partition_to_be_dropped_or_truncated.t0_dropped(id int primary key, c int, name char(20)); +create table partition_to_be_dropped_or_truncated.t1_dropped(id int primary key, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); +create table partition_to_be_dropped_or_truncated.t0_truncated(id int primary key, c int, name char(20)); +create table partition_to_be_dropped_or_truncated.t1_truncated(id int primary key, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); + +create index k1 on partition_to_be_dropped_or_truncated.t0_dropped (name); +create index k2 on partition_to_be_dropped_or_truncated.t0_dropped (c); +create index k1 on partition_to_be_dropped_or_truncated.t1_dropped (name); +create index k2 on partition_to_be_dropped_or_truncated.t1_dropped (c); +create index k3 on partition_to_be_dropped_or_truncated.t1_dropped (id, c); + +create index k1 on partition_to_be_dropped_or_truncated.t0_truncated (name); +create index k2 on partition_to_be_dropped_or_truncated.t0_truncated (c); +create index k1 on partition_to_be_dropped_or_truncated.t1_truncated (name); +create index k2 on partition_to_be_dropped_or_truncated.t1_truncated (c); +create index k3 on partition_to_be_dropped_or_truncated.t1_truncated (id, c); + +insert into partition_to_be_dropped_or_truncated.t0_dropped values (1, 2, "123"), (2, 3, "123"); +insert into partition_to_be_dropped_or_truncated.t1_dropped values (1, 2, "123"), (2, 3, "123"); + +insert into partition_to_be_dropped_or_truncated.t0_truncated values (1, 2, "123"), (2, 3, "123"); +insert into partition_to_be_dropped_or_truncated.t1_truncated values (1, 2, "123"), (2, 3, "123"); +-- 4. Drop Table Index/PrimaryKey +create database index_or_primarykey_to_be_dropped; +create table index_or_primarykey_to_be_dropped.t0(id int primary key nonclustered, c int, name char(20)); +create table index_or_primarykey_to_be_dropped.t1(id int primary key nonclustered, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); + +create index k1 on index_or_primarykey_to_be_dropped.t0 (name); +create index k2 on index_or_primarykey_to_be_dropped.t0 (c); +create index k1 on index_or_primarykey_to_be_dropped.t1 (name); +create index k2 on index_or_primarykey_to_be_dropped.t1 (c); +create index k3 on index_or_primarykey_to_be_dropped.t1 (id, c); + +insert into index_or_primarykey_to_be_dropped.t0 values (1, 2, "123"), (2, 3, "123"); +insert into index_or_primarykey_to_be_dropped.t1 values (1, 2, "123"), (2, 3, "123"); +-- 5. Drop Table INDEXES +create database indexes_to_be_dropped; +create table indexes_to_be_dropped.t0(id int primary key nonclustered, c int, name char(20)); +create table indexes_to_be_dropped.t1(id int primary key nonclustered, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); + +create index k1 on indexes_to_be_dropped.t0 (name); +create index k2 on indexes_to_be_dropped.t0 (c); +create index k1 on indexes_to_be_dropped.t1 (name); +create index k2 on indexes_to_be_dropped.t1 (c); +create index k3 on indexes_to_be_dropped.t1 (id, c); + +insert into indexes_to_be_dropped.t0 values (1, 2, "123"), (2, 3, "123"); +insert into indexes_to_be_dropped.t1 values (1, 2, "123"), (2, 3, "123"); +-- 6. Drop Table Column/Columns +create database column_s_to_be_dropped; +create table column_s_to_be_dropped.t0_column(id int primary key nonclustered, c int, name char(20)); +create table column_s_to_be_dropped.t1_column(id int primary key nonclustered, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); +create table column_s_to_be_dropped.t0_columns(id int primary key nonclustered, c int, name char(20)); +create table column_s_to_be_dropped.t1_columns(id int primary key nonclustered, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); + +create index k1 on column_s_to_be_dropped.t0_column (name); +create index k2 on column_s_to_be_dropped.t0_column (c); +create index k1 on column_s_to_be_dropped.t1_column (name); +create index k2 on column_s_to_be_dropped.t1_column (c); +create index k3 on column_s_to_be_dropped.t1_column (id, c); + +create index k1 on column_s_to_be_dropped.t0_columns (name); +create index k2 on column_s_to_be_dropped.t0_columns (c); +create index k1 on column_s_to_be_dropped.t1_columns (name); +create index k2 on column_s_to_be_dropped.t1_columns (c); +-- create index k3 on column_s_to_be_dropped.t1_columns (id, c); + +insert into column_s_to_be_dropped.t0_column values (1, 2, "123"), (2, 3, "123"); +insert into column_s_to_be_dropped.t1_column values (1, 2, "123"), (2, 3, "123"); +insert into column_s_to_be_dropped.t0_columns values (1, 2, "123"), (2, 3, "123"); +insert into column_s_to_be_dropped.t1_columns values (1, 2, "123"), (2, 3, "123"); +-- 7. Modify Table Column +create database column_to_be_modified; +create table column_to_be_modified.t0(id int primary key nonclustered, c int, name char(20)); +create table column_to_be_modified.t1(id int primary key nonclustered, c int, name char(20)) PARTITION BY RANGE(id) ( PARTITION p0 VALUES LESS THAN (0), PARTITION p1 VALUES LESS THAN (10), PARTITION p2 VALUES LESS THAN MAXVALUE ); + +create index k1 on column_to_be_modified.t0 (name); +create index k2 on column_to_be_modified.t0 (c); +create index k1 on column_to_be_modified.t1 (name); +create index k2 on column_to_be_modified.t1 (c); +create index k3 on column_to_be_modified.t1 (id, c); + +insert into column_to_be_modified.t0 values (1, 2, "123"), (2, 3, "123"); +insert into column_to_be_modified.t1 values (1, 2, "123"), (2, 3, "123"); diff --git a/br/tests/br_pitr/run.sh b/br/tests/br_pitr/run.sh new file mode 100644 index 0000000000000..25a7fda5588f2 --- /dev/null +++ b/br/tests/br_pitr/run.sh @@ -0,0 +1,100 @@ +#!/bin/bash +# +# Copyright 2023 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eu +. run_services +CUR=$(cd `dirname $0`; pwd) + +# const value +PREFIX="pitr_backup" # NOTICE: don't start with 'br' because `restart services` would remove file/directory br*. +res_file="$TEST_DIR/sql_res.$TEST_NAME.txt" + +# start a new cluster +echo "restart a services" +restart_services + +# prepare the data +echo "prepare the data" +run_sql_file $CUR/prepare_data/delete_range.sql +# ... + +# start the log backup task +echo "start log task" +run_br --pd $PD_ADDR log start --task-name integration_test -s "local://$TEST_DIR/$PREFIX/log" + +# run snapshot backup +echo "run snapshot backup" +run_br --pd $PD_ADDR backup full -s "local://$TEST_DIR/$PREFIX/full" + +# load the incremental data +echo "load the incremental data" +run_sql_file $CUR/incremental_data/delete_range.sql +# ... + +# wait checkpoint advance +echo "wait checkpoint advance" +sleep 10 +current_ts=$(echo $(($(date +%s%3N) << 18))) +echo "current ts: $current_ts" +i=0 +while true; do + # extract the checkpoint ts of the log backup task. If there is some error, the checkpoint ts should be empty + log_backup_status=$(unset BR_LOG_TO_TERM && run_br --pd $PD_ADDR log status --task-name integration_test --json 2>/dev/null) + echo "log backup status: $log_backup_status" + checkpoint_ts=$(echo "$log_backup_status" | head -n 1 | jq 'if .[0].last_errors | length == 0 then .[0].checkpoint else empty end') + echo "checkpoint ts: $checkpoint_ts" + + # check whether the checkpoint ts is a number + if [ $checkpoint_ts -gt 0 ] 2>/dev/null; then + # check whether the checkpoint has advanced + if [ $checkpoint_ts -gt $current_ts ]; then + echo "the checkpoint has advanced" + break + fi + # the checkpoint hasn't advanced + echo "the checkpoint hasn't advanced" + i=$((i+1)) + if [ "$i" -gt 50 ]; then + echo 'the checkpoint lag is too large' + exit 1 + fi + sleep 10 + else + # unknown status, maybe somewhere is wrong + echo "TEST: [$TEST_NAME] failed to wait checkpoint advance!" + exit 1 + fi +done + +# dump some info from upstream cluster +# ... + +# start a new cluster +echo "restart a services" +restart_services + +# PITR restore +echo "run pitr" +run_br --pd $PD_ADDR restore point -s "local://$TEST_DIR/$PREFIX/log" --full-backup-storage "local://$TEST_DIR/$PREFIX/full" > $res_file 2>&1 + +# check something in downstream cluster +echo "check br log" +check_contains "restore log success summary" +# check_not_contains "rewrite delete range" +echo "" > $res_file +echo "check sql result" +run_sql "select count(*) DELETE_RANGE_CNT from mysql.gc_delete_range group by ts order by DELETE_RANGE_CNT desc limit 1;" +check_contains "DELETE_RANGE_CNT: 46" diff --git a/br/tests/lightning_max_random/run.sh b/br/tests/lightning_max_random/run.sh index 193476ee04371..f28aa7f87e50e 100644 --- a/br/tests/lightning_max_random/run.sh +++ b/br/tests/lightning_max_random/run.sh @@ -44,6 +44,8 @@ cleanup() { cleanup +export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/lightning/backend/local/changeEpochVersion=1*return(-1)" + # auto_random_max = 2^{64-1-10}-1 # db.test contains key auto_random_max - 1 # db.test1 contains key auto_random_max @@ -63,4 +65,31 @@ check_contains 'ERROR' run_sql 'INSERT INTO db.test2(b) VALUES(33);' run_sql 'INSERT INTO db.test2(b) VALUES(44);' run_sql 'INSERT INTO db.test2(b) VALUES(55);' + +grep 'RequestTooOld' "$TEST_DIR/lightning.log" | grep -q 'needRescan' +cleanup + +export GO_FAILPOINTS="github.com/pingcap/tidb/br/pkg/lightning/backend/local/changeEpochVersion=1*return(10)" + +# auto_random_max = 2^{64-1-10}-1 +# db.test contains key auto_random_max - 1 +# db.test1 contains key auto_random_max +# db.test2 contains key auto_random_max + 1 (overflow) +run_lightning --sorted-kv-dir "$TEST_DIR/sst" --config "$CUR/config.toml" --log-file "$TEST_DIR/lightning.log" +check_result +# successfully insert: d.test auto_random key has not reached maximum +run_sql 'INSERT INTO db.test(b) VALUES(11);' +# fail for further insertion +run_sql 'INSERT INTO db.test(b) VALUES(22);' 2>&1 | tee -a "$TEST_DIR/sql_res.$TEST_NAME.txt" +check_contains 'ERROR' +# fail: db.test1 has key auto_random_max +run_sql 'INSERT INTO db.test1(b) VALUES(11);' +run_sql 'INSERT INTO db.test1(b) VALUES(22);' 2>&1 | tee -a "$TEST_DIR/sql_res.$TEST_NAME.txt" +check_contains 'ERROR' +# successfully insert for overflow key +run_sql 'INSERT INTO db.test2(b) VALUES(33);' +run_sql 'INSERT INTO db.test2(b) VALUES(44);' +run_sql 'INSERT INTO db.test2(b) VALUES(55);' + +grep 'RequestTooNew' "$TEST_DIR/lightning.log" | grep -q 'regionScanned' cleanup diff --git a/br/tests/run_group.sh b/br/tests/run_group.sh index 39068ab078427..8da15cab19a30 100755 --- a/br/tests/run_group.sh +++ b/br/tests/run_group.sh @@ -23,7 +23,7 @@ groups=( ["G00"]="br_300_small_tables br_backup_empty br_backup_version br_cache_table br_case_sensitive br_charset_gbk br_check_new_collocation_enable" ["G01"]="br_autoid br_crypter2 br_db br_db_online br_db_online_newkv br_db_skip br_debug_meta br_ebs br_foreign_key br_full" ["G02"]="br_full_cluster_restore br_full_ddl br_full_index br_gcs br_history" - ["G03"]='br_incompatible_tidb_config br_incremental br_incremental_ddl br_incremental_index' + ["G03"]='br_incompatible_tidb_config br_incremental br_incremental_ddl br_incremental_index br_pitr' ["G04"]='br_incremental_only_ddl br_incremental_same_table br_insert_after_restore br_key_locked br_log_test br_move_backup br_mv_index br_other br_partition_add_index' ["G05"]='br_range br_rawkv br_replica_read br_restore_TDE_enable br_restore_log_task_enable br_s3 br_shuffle_leader br_shuffle_region br_single_table' ["G06"]='br_skip_checksum br_small_batch_size br_split_region_fail br_systables br_table_filter br_txn' diff --git a/build/patches/com_github_grpc_ecosystem_grpc_gateway.patch b/build/patches/com_github_grpc_ecosystem_grpc_gateway.patch index 304063a90cb27..db8e4120035a1 100644 --- a/build/patches/com_github_grpc_ecosystem_grpc_gateway.patch +++ b/build/patches/com_github_grpc_ecosystem_grpc_gateway.patch @@ -63,13 +63,13 @@ index c4d18f624..41d5319fb 100644 - "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", - "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", - "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", -+ "@com_github_golang_protobuf//protoc-gen-go/descriptor:go_default_library", ++ "@com_github_golang_protobuf//protoc-gen-go/descriptor:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library", + "@com_github_golang_protobuf//ptypes/any:go_default_library", + "@com_github_golang_protobuf//ptypes/duration:go_default_library", + "@com_github_golang_protobuf//ptypes/timestamp:go_default_library", + "@com_github_golang_protobuf//ptypes/wrappers:go_default_library", -+ "@org_golang_google_genproto//googleapis/api/httpbody:go_default_library", ++ "@org_golang_google_genproto_googleapis_api//httpbody", + "@org_golang_google_genproto//protobuf/field_mask:go_default_library", "@org_golang_google_grpc//codes:go_default_library", "@org_golang_google_grpc//grpclog:go_default_library", diff --git a/cmd/tidb-server/main.go b/cmd/tidb-server/main.go index cd7da32550a4d..81908fd04a825 100644 --- a/cmd/tidb-server/main.go +++ b/cmd/tidb-server/main.go @@ -266,6 +266,7 @@ func main() { checkTempStorageQuota() } setupLog() + memory.InitMemoryHook() setupExtensions() setupStmtSummary() diff --git a/dumpling/OWNERS b/dumpling/OWNERS new file mode 100644 index 0000000000000..408c9e3b3f0ac --- /dev/null +++ b/dumpling/OWNERS @@ -0,0 +1,7 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-dumpling +labels: + - component/dumpling diff --git a/dumpling/export/config.go b/dumpling/export/config.go index f1dd093375f31..7b272207d6185 100644 --- a/dumpling/export/config.go +++ b/dumpling/export/config.go @@ -75,11 +75,47 @@ const ( flagReadTimeout = "read-timeout" flagTransactionalConsistency = "transactional-consistency" flagCompress = "compress" + flagCsvOutputDialect = "csv-output-dialect" // FlagHelp represents the help flag FlagHelp = "help" ) +// CSVDialect is the dialect of the CSV output for compatible with different import target +type CSVDialect int + +const ( + // CSVDialectDefault is the default dialect, which is MySQL/MariaDB/TiDB etc. + CSVDialectDefault CSVDialect = iota + // CSVDialectSnowflake is the dialect of Snowflake + CSVDialectSnowflake + // CSVDialectRedshift is the dialect of Redshift + CSVDialectRedshift + // CSVDialectBase64 is a dialect require base64 binary format, only used for test now. + CSVDialectBase64 +) + +// BinaryFormat is the format of binary data +// Three standard formats are supported: UTF8, HEX and Base64 now. +type BinaryFormat int + +const ( + // BinaryFormatUTF8 is the default format, format binary data as UTF8 string + BinaryFormatUTF8 BinaryFormat = iota + // BinaryFormatHEX format binary data as HEX string, e.g. 12ABCD + BinaryFormatHEX + // BinaryFormatBase64 format binary data as Base64 string, e.g. 123qwer== + BinaryFormatBase64 +) + +// DialectBinaryFormatMap is the map of dialect and binary format +var DialectBinaryFormatMap = map[CSVDialect]BinaryFormat{ + CSVDialectDefault: BinaryFormatUTF8, + CSVDialectSnowflake: BinaryFormatHEX, + CSVDialectRedshift: BinaryFormatHEX, + CSVDialectBase64: BinaryFormatBase64, +} + // Config is the dump config for dumpling type Config struct { storage.BackendOptions @@ -142,6 +178,7 @@ type Config struct { SessionParams map[string]interface{} Tables DatabaseTables CollationCompatible string + CsvOutputDialect CSVDialect Labels prometheus.Labels `json:"-"` PromFactory promutil.Factory `json:"-"` @@ -198,6 +235,7 @@ func DefaultConfig() *Config { OutputFileTemplate: DefaultOutputFileTemplate, PosAfterConnect: false, CollationCompatible: LooseCollationCompatible, + CsvOutputDialect: CSVDialectDefault, SpecifiedTables: false, PromFactory: promutil.NewDefaultFactory(), PromRegistry: promutil.NewDefaultRegistry(), @@ -313,6 +351,7 @@ func (*Config) DefineFlags(flags *pflag.FlagSet) { flags.Bool(flagTransactionalConsistency, true, "Only support transactional consistency") _ = flags.MarkHidden(flagTransactionalConsistency) flags.StringP(flagCompress, "c", "", "Compress output file type, support 'gzip', 'snappy', 'zstd', 'no-compression' now") + flags.String(flagCsvOutputDialect, "", "The dialect of output CSV file, support 'snowflake', 'redshift' now") } // ParseFromFlags parses dumpling's export.Config from flags @@ -546,6 +585,18 @@ func (conf *Config) ParseFromFlags(flags *pflag.FlagSet) error { return errors.Trace(err) } + dialect, err := flags.GetString(flagCsvOutputDialect) + if err != nil { + return errors.Trace(err) + } + if dialect != "" && conf.FileType != "csv" { + return errors.Errorf("%s is only supported when dumping whole table to csv, not compatible with %s", flagCsvOutputDialect, conf.FileType) + } + conf.CsvOutputDialect, err = ParseOutputDialect(dialect) + if err != nil { + return errors.Trace(err) + } + for k, v := range params { conf.SessionParams[k] = v } @@ -630,6 +681,20 @@ func ParseCompressType(compressType string) (storage.CompressType, error) { } } +// ParseOutputDialect parses output dialect string to Dialect +func ParseOutputDialect(outputDialect string) (CSVDialect, error) { + switch outputDialect { + case "", "default": + return CSVDialectDefault, nil + case "snowflake": + return CSVDialectSnowflake, nil + case "redshift": + return CSVDialectRedshift, nil + default: + return CSVDialectDefault, errors.Errorf("unknown output dialect %s", outputDialect) + } +} + func (conf *Config) createExternalStorage(ctx context.Context) (storage.ExternalStorage, error) { if conf.ExtStorage != nil { return conf.ExtStorage, nil diff --git a/dumpling/export/sql_type.go b/dumpling/export/sql_type.go index 90dbc34cc23d3..e34728a0c173f 100644 --- a/dumpling/export/sql_type.go +++ b/dumpling/export/sql_type.go @@ -5,6 +5,7 @@ package export import ( "bytes" "database/sql" + "encoding/base64" "fmt" ) @@ -175,7 +176,7 @@ func SQLTypeNumberMaker() RowReceiverStringer { } // MakeRowReceiver constructs RowReceiverArr from column types -func MakeRowReceiver(colTypes []string) RowReceiverArr { +func MakeRowReceiver(colTypes []string) *RowReceiverArr { rowReceiverArr := make([]RowReceiverStringer, len(colTypes)) for i, colTp := range colTypes { recMaker, ok := colTypeRowReceiverMap[colTp] @@ -184,7 +185,7 @@ func MakeRowReceiver(colTypes []string) RowReceiverArr { } rowReceiverArr[i] = recMaker() } - return RowReceiverArr{ + return &RowReceiverArr{ bound: false, receivers: rowReceiverArr, } @@ -197,7 +198,7 @@ type RowReceiverArr struct { } // BindAddress implements RowReceiver.BindAddress -func (r RowReceiverArr) BindAddress(args []interface{}) { +func (r *RowReceiverArr) BindAddress(args []interface{}) { if r.bound { return } @@ -208,7 +209,7 @@ func (r RowReceiverArr) BindAddress(args []interface{}) { } // WriteToBuffer implements Stringer.WriteToBuffer -func (r RowReceiverArr) WriteToBuffer(bf *bytes.Buffer, escapeBackslash bool) { +func (r *RowReceiverArr) WriteToBuffer(bf *bytes.Buffer, escapeBackslash bool) { bf.WriteByte('(') for i, receiver := range r.receivers { receiver.WriteToBuffer(bf, escapeBackslash) @@ -220,7 +221,7 @@ func (r RowReceiverArr) WriteToBuffer(bf *bytes.Buffer, escapeBackslash bool) { } // WriteToBufferInCsv implements Stringer.WriteToBufferInCsv -func (r RowReceiverArr) WriteToBufferInCsv(bf *bytes.Buffer, escapeBackslash bool, opt *csvOption) { +func (r *RowReceiverArr) WriteToBufferInCsv(bf *bytes.Buffer, escapeBackslash bool, opt *csvOption) { for i, receiver := range r.receivers { receiver.WriteToBufferInCsv(bf, escapeBackslash, opt) if i != len(r.receivers)-1 { @@ -307,7 +308,14 @@ func (s *SQLTypeBytes) WriteToBuffer(bf *bytes.Buffer, _ bool) { func (s *SQLTypeBytes) WriteToBufferInCsv(bf *bytes.Buffer, escapeBackslash bool, opt *csvOption) { if s.RawBytes != nil { bf.Write(opt.delimiter) - escapeCSV(s.RawBytes, bf, escapeBackslash, opt) + switch opt.binaryFormat { + case BinaryFormatHEX: + fmt.Fprintf(bf, "%x", s.RawBytes) + case BinaryFormatBase64: + bf.WriteString(base64.StdEncoding.EncodeToString(s.RawBytes)) + default: + escapeCSV(s.RawBytes, bf, escapeBackslash, opt) + } bf.Write(opt.delimiter) } else { bf.WriteString(opt.nullValue) diff --git a/dumpling/export/writer.go b/dumpling/export/writer.go index 6e52f2384b787..20515b1cbd99a 100644 --- a/dumpling/export/writer.go +++ b/dumpling/export/writer.go @@ -308,6 +308,7 @@ type csvOption struct { separator []byte delimiter []byte lineTerminator []byte + binaryFormat BinaryFormat } func newOutputFileNamer(meta TableMeta, chunkIdx int, rows, fileSize bool) *outputFileNamer { diff --git a/dumpling/export/writer_serial_test.go b/dumpling/export/writer_serial_test.go index 586fa8e8d794d..0f80e14926dcc 100644 --- a/dumpling/export/writer_serial_test.go +++ b/dumpling/export/writer_serial_test.go @@ -245,6 +245,75 @@ func TestWriteInsertInCsvReturnsError(t *testing.T) { require.Equal(t, float64(0), ReadGauge(m.finishedSizeGauge)) } +func TestWriteInsertInCsvWithDialect(t *testing.T) { + cfg := createMockConfig() + + data := [][]driver.Value{ + {"1", "male", "bob@mail.com", "020-1234", "blob1"}, + {"2", "female", "sarah@mail.com", "020-1253", "blob2"}, + {"3", "male", "john@mail.com", "020-1256", "blob3"}, + {"4", "female", "sarah@mail.com", "020-1235", "blob4"}, + } + colTypes := []string{"INT", "SET", "VARCHAR", "VARCHAR", "BLOB"} + opt := &csvOption{separator: []byte(","), delimiter: []byte{'"'}, nullValue: "\\N", lineTerminator: []byte("\r\n")} + conf := configForWriteCSV(cfg, true, opt) + + { + // test UTF8 + conf.CsvOutputDialect = CSVDialectDefault + tableIR := newMockTableIR("test", "employee", data, nil, colTypes) + m := newMetrics(conf.PromFactory, conf.Labels) + bf := storage.NewBufferWriter() + n, err := WriteInsertInCsv(tcontext.Background(), conf, tableIR, tableIR, bf, m) + require.NoError(t, err) + require.Equal(t, uint64(4), n) + + expected := "1,\"male\",\"bob@mail.com\",\"020-1234\",\"blob1\"\r\n" + + "2,\"female\",\"sarah@mail.com\",\"020-1253\",\"blob2\"\r\n" + + "3,\"male\",\"john@mail.com\",\"020-1256\",\"blob3\"\r\n" + + "4,\"female\",\"sarah@mail.com\",\"020-1235\",\"blob4\"\r\n" + require.Equal(t, expected, bf.String()) + require.Equal(t, float64(4), ReadGauge(m.finishedRowsGauge)) + require.Equal(t, float64(185), ReadGauge(m.finishedSizeGauge)) + } + { + // test HEX + conf.CsvOutputDialect = CSVDialectRedshift + tableIR := newMockTableIR("test", "employee", data, nil, colTypes) + m := newMetrics(conf.PromFactory, conf.Labels) + bf := storage.NewBufferWriter() + n, err := WriteInsertInCsv(tcontext.Background(), conf, tableIR, tableIR, bf, m) + require.NoError(t, err) + require.Equal(t, uint64(4), n) + + expected := "1,\"male\",\"bob@mail.com\",\"020-1234\",\"626c6f6231\"\r\n" + + "2,\"female\",\"sarah@mail.com\",\"020-1253\",\"626c6f6232\"\r\n" + + "3,\"male\",\"john@mail.com\",\"020-1256\",\"626c6f6233\"\r\n" + + "4,\"female\",\"sarah@mail.com\",\"020-1235\",\"626c6f6234\"\r\n" + require.Equal(t, expected, bf.String()) + require.Equal(t, float64(4), ReadGauge(m.finishedRowsGauge)) + require.Equal(t, float64(205), ReadGauge(m.finishedSizeGauge)) + } + { + // test Base64 + conf.CsvOutputDialect = CSVDialectBase64 + tableIR := newMockTableIR("test", "employee", data, nil, colTypes) + m := newMetrics(conf.PromFactory, conf.Labels) + bf := storage.NewBufferWriter() + n, err := WriteInsertInCsv(tcontext.Background(), conf, tableIR, tableIR, bf, m) + require.NoError(t, err) + require.Equal(t, uint64(4), n) + + expected := "1,\"male\",\"bob@mail.com\",\"020-1234\",\"YmxvYjE=\"\r\n" + + "2,\"female\",\"sarah@mail.com\",\"020-1253\",\"YmxvYjI=\"\r\n" + + "3,\"male\",\"john@mail.com\",\"020-1256\",\"YmxvYjM=\"\r\n" + + "4,\"female\",\"sarah@mail.com\",\"020-1235\",\"YmxvYjQ=\"\r\n" + require.Equal(t, expected, bf.String()) + require.Equal(t, float64(4), ReadGauge(m.finishedRowsGauge)) + require.Equal(t, float64(197), ReadGauge(m.finishedSizeGauge)) + } +} + func TestSQLDataTypes(t *testing.T) { cfg := createMockConfig() diff --git a/dumpling/export/writer_util.go b/dumpling/export/writer_util.go index 1d4e328703336..23d70b3785a02 100644 --- a/dumpling/export/writer_util.go +++ b/dumpling/export/writer_util.go @@ -313,6 +313,7 @@ func WriteInsertInCsv( separator: []byte(cfg.CsvSeparator), delimiter: []byte(cfg.CsvDelimiter), lineTerminator: []byte(cfg.CsvLineTerminator), + binaryFormat: DialectBinaryFormatMap[cfg.CsvOutputDialect], } // use context.Background here to make sure writerPipe can deplete all the chunks in pipeline diff --git a/errors.toml b/errors.toml index 16cc4db60099e..35eef4500f3b2 100644 --- a/errors.toml +++ b/errors.toml @@ -1251,6 +1251,11 @@ error = ''' Check constraint '%s' refers to non-existing column '%s'. ''' +["ddl:3822"] +error = ''' +Duplicate check constraint name '%s'. +''' + ["ddl:3823"] error = ''' Column '%s' cannot be used in a check constraint '%s': needed in a foreign key constraint referential action. @@ -1871,6 +1876,16 @@ error = ''' BRIE Job %d not found ''' +["executor:8175"] +error = ''' +Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=%d] +''' + +["executor:8176"] +error = ''' +Your query has been cancelled due to exceeding the allowed memory limit for the tidb-server instance and this query is currently using the most memory. Please try narrowing your query scope or increase the tidb_server_memory_limit and try again.[conn=%d] +''' + ["executor:8212"] error = ''' Failed to split region ranges: %s @@ -3218,7 +3233,7 @@ Bad Number ["types:8030"] error = ''' -Cast to signed converted positive out-of-range integer to it's negative complement +Cast to signed converted positive out-of-range integer to its negative complement ''' ["types:8031"] diff --git a/go.mod b/go.mod index 59a0f1836ee1d..91068ae232b08 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/cockroachdb/errors v1.8.1 github.com/cockroachdb/pebble v0.0.0-20220415182917-06c9d3be25b3 github.com/coocood/freecache v1.2.1 - github.com/coreos/go-semver v0.3.0 + github.com/coreos/go-semver v0.3.1 github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 github.com/daixiang0/gci v0.11.0 github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 @@ -53,7 +53,7 @@ require ( github.com/google/btree v1.1.2 github.com/google/pprof v0.0.0-20211122183932-1daafda22083 github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.3.1 github.com/gordonklaus/ineffassign v0.0.0-20230610083614-0e73809eb601 github.com/gorilla/mux v1.8.0 github.com/gostaticanalysis/forcetypeassert v0.1.0 @@ -86,8 +86,7 @@ require ( github.com/pingcap/sysutil v1.0.1-0.20230407040306-fb007c5aff21 github.com/pingcap/tidb/pkg/parser v0.0.0-20211011031125-9b13dc409c5e github.com/pingcap/tipb v0.0.0-20230919054518-dfd7d194838f - github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.16.0 + github.com/prometheus/client_golang v1.17.0 github.com/prometheus/client_model v0.5.0 github.com/prometheus/common v0.44.0 github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64 @@ -103,7 +102,7 @@ require ( github.com/stretchr/testify v1.8.4 github.com/tdakkota/asciicheck v0.2.0 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 - github.com/tikv/client-go/v2 v2.0.8-0.20231010061802-07432ef6c031 + github.com/tikv/client-go/v2 v2.0.8-0.20231030120815-1362f1e87566 github.com/tikv/pd/client v0.0.0-20230912103610-2f57a9f050eb github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 github.com/twmb/murmur3 v1.1.6 @@ -112,29 +111,29 @@ require ( github.com/wangjohn/quickselect v0.0.0-20161129230411-ed8402a42d5f github.com/xitongsys/parquet-go v1.5.5-0.20201110004701-b09c49d6d457 github.com/xitongsys/parquet-go-source v0.0.0-20200817004010-026bad9b25d0 - go.etcd.io/etcd/api/v3 v3.5.2 - go.etcd.io/etcd/client/pkg/v3 v3.5.2 - go.etcd.io/etcd/client/v3 v3.5.2 - go.etcd.io/etcd/server/v3 v3.5.2 - go.etcd.io/etcd/tests/v3 v3.5.2 + go.etcd.io/etcd/api/v3 v3.5.10 + go.etcd.io/etcd/client/pkg/v3 v3.5.10 + go.etcd.io/etcd/client/v3 v3.5.10 + go.etcd.io/etcd/server/v3 v3.5.10 + go.etcd.io/etcd/tests/v3 v3.5.10 go.opencensus.io v0.24.0 go.uber.org/atomic v1.11.0 go.uber.org/automaxprocs v1.5.3 go.uber.org/goleak v1.2.1 - go.uber.org/mock v0.2.0 + go.uber.org/mock v0.3.0 go.uber.org/multierr v1.11.0 go.uber.org/zap v1.26.0 golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 golang.org/x/net v0.17.0 - golang.org/x/oauth2 v0.8.0 + golang.org/x/oauth2 v0.11.0 golang.org/x/sync v0.3.0 golang.org/x/sys v0.13.0 golang.org/x/term v0.13.0 golang.org/x/text v0.13.0 golang.org/x/time v0.3.0 golang.org/x/tools v0.10.0 - google.golang.org/api v0.114.0 - google.golang.org/grpc v1.54.0 + google.golang.org/api v0.128.0 + google.golang.org/grpc v1.59.0 gopkg.in/yaml.v2 v2.4.0 honnef.co/go/tools v0.4.5 k8s.io/api v0.27.2 @@ -143,20 +142,28 @@ require ( ) require ( + github.com/cenkalti/backoff/v4 v4.1.1 // indirect github.com/dolthub/maphash v0.1.0 // indirect + github.com/golang-jwt/jwt/v4 v4.4.2 // indirect + github.com/google/s2a-go v0.1.4 // indirect github.com/jfcg/sixb v1.3.8 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/shabbyrobe/gocovmerge v0.0.0-20190829150210-3e036491d500 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect ) require ( - cloud.google.com/go v0.110.0 // indirect - cloud.google.com/go/compute v1.19.0 // indirect + cloud.google.com/go v0.110.8 // indirect + cloud.google.com/go/compute v1.23.1 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.13.0 // indirect - cloud.google.com/go/pubsub v1.30.0 // indirect + cloud.google.com/go/iam v1.1.3 // indirect + cloud.google.com/go/pubsub v1.33.0 // indirect github.com/Azure/azure-sdk-for-go/sdk/internal v1.3.0 // indirect github.com/Azure/go-ntlmssp v0.0.0-20221128193559-754e69321358 // indirect github.com/AzureAD/microsoft-authentication-library-for-go v0.5.1 // indirect @@ -179,16 +186,15 @@ require ( github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2 // indirect github.com/coocood/bbloom v0.0.0-20190830030839-58deb6228d64 // indirect github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2 // indirect - github.com/coreos/go-systemd/v22 v22.3.2 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/eapache/go-resiliency v1.2.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 // indirect github.com/eapache/queue v1.1.0 // indirect - github.com/fatih/structtag v1.2.0 // indirect + github.com/fatih/structtag v1.2.0 github.com/felixge/httpsnoop v1.0.2 // indirect - github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect github.com/go-asn1-ber/asn1-ber v1.5.4 // indirect github.com/go-kit/kit v0.9.0 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect @@ -196,14 +202,13 @@ require ( github.com/go-ole/go-ole v1.2.6 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/golang-jwt/jwt v3.2.1+incompatible // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/go-cmp v0.5.9 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/licensecheck v0.3.1 // indirect github.com/google/renameio/v2 v2.0.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect - github.com/googleapis/gax-go/v2 v2.7.1 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.4 // indirect + github.com/googleapis/gax-go/v2 v2.12.0 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/websocket v1.4.2 // indirect github.com/gostaticanalysis/analysisutil v0.7.1 // indirect @@ -274,27 +279,22 @@ require ( github.com/uber/jaeger-lib v2.4.1+incompatible // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect - go.etcd.io/bbolt v1.3.6 // indirect - go.etcd.io/etcd/client/v2 v2.305.2 // indirect - go.etcd.io/etcd/pkg/v3 v3.5.2 // indirect - go.etcd.io/etcd/raft/v3 v3.5.2 // indirect - go.opentelemetry.io/contrib v0.20.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect - go.opentelemetry.io/otel v0.20.0 // indirect - go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect - go.opentelemetry.io/otel/metric v0.20.0 // indirect - go.opentelemetry.io/otel/sdk v0.20.0 // indirect - go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect - go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect - go.opentelemetry.io/otel/trace v0.20.0 // indirect - go.opentelemetry.io/proto/otlp v0.7.0 // indirect + go.etcd.io/bbolt v1.3.8 // indirect + go.etcd.io/etcd/client/v2 v2.305.10 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.10 // indirect + go.etcd.io/etcd/raft/v3 v3.5.10 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0 // indirect + go.opentelemetry.io/otel v1.0.1 // indirect + go.opentelemetry.io/otel/sdk v1.0.1 // indirect + go.opentelemetry.io/otel/trace v1.0.1 // indirect + go.opentelemetry.io/proto/otlp v0.9.0 // indirect golang.org/x/crypto v0.14.0 // indirect golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect golang.org/x/mod v0.11.0 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect gonum.org/v1/gonum v0.8.2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect + google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect google.golang.org/protobuf v1.31.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect diff --git a/go.sum b/go.sum index b94e413d9b828..0a8092b6a0c05 100644 --- a/go.sum +++ b/go.sum @@ -13,33 +13,30 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= -cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME= +cloud.google.com/go v0.110.8/go.mod h1:Iz8AkXJf1qmxC3Oxoep8R1T36w8B92yU29PcBhHO5fk= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.19.0 h1:+9zda3WGgW1ZSTlVppLCYFIr48Pa35q1uG2N1itbCEQ= -cloud.google.com/go/compute v1.19.0/go.mod h1:rikpw2y+UMidAe9tISo04EHNOIf42RLYF/q8Bs93scU= +cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0= +cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v0.13.0 h1:+CmB+K0J/33d0zSQ9SlFWUeCCEn5XJA0ZMZ3pHE9u8k= -cloud.google.com/go/iam v0.13.0/go.mod h1:ljOg+rcNfzZ5d6f1nAUJ8ZIxOaZUVoS14bKCtaLZ/D0= -cloud.google.com/go/kms v1.10.1 h1:7hm1bRqGCA1GBRQUrp831TwJ9TWhP+tvLuP497CQS2g= -cloud.google.com/go/kms v1.10.1/go.mod h1:rIWk/TryCkR59GMC3YtHtXeLzd634lBbKenvyySAyYI= -cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= -cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/iam v1.1.3 h1:18tKG7DzydKWUnLjonWcJO6wjSCAtzh4GcRKlH/Hrzc= +cloud.google.com/go/iam v1.1.3/go.mod h1:3khUlaBXfPKKe7huYgEpDn6FtgRyMEqbkvBxrQyY5SE= +cloud.google.com/go/kms v1.15.3 h1:RYsbxTRmk91ydKCzekI2YjryO4c5Y2M80Zwcs9/D/cI= +cloud.google.com/go/kms v1.15.3/go.mod h1:AJdXqHxS2GlPyduM99s9iGqi2nwbviBbhV/hdmt4iOQ= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.30.0 h1:vCge8m7aUKBJYOgrZp7EsNDf6QMd2CAlXZqWTn3yq6s= -cloud.google.com/go/pubsub v1.30.0/go.mod h1:qWi1OPS0B+b5L+Sg6Gmc9zD1Y+HaM0MdUr7LsupY1P4= +cloud.google.com/go/pubsub v1.33.0 h1:6SPCPvWav64tj0sVX/+npCBKhUi/UjJehy9op/V3p2g= +cloud.google.com/go/pubsub v1.33.0/go.mod h1:f+w71I33OMyxf9VpMVcZbnG5KSUkCOUHYpFd5U1GdRc= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= @@ -108,10 +105,7 @@ github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat6 github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexbrainman/sspi v0.0.0-20210105120005-909beea2cc74 h1:Kk6a4nehpJ3UuJRqlA3JxYxBZEqCeOmATOvrbT4p9RA= @@ -142,18 +136,14 @@ github.com/bazelbuild/buildtools v0.0.0-20230926111657-7d855c59baeb h1:4k69c5E7S github.com/bazelbuild/buildtools v0.0.0-20230926111657-7d855c59baeb/go.mod h1:689QdV3hBP7Vo9dJMmzhoYIyo/9iMhEmHkJcnaPRCbo= github.com/bazelbuild/rules_go v0.40.0 h1:i2HspGKiHMAnq2xIsp7sGJ7CiIlLlEKBtL1aogLJhEo= github.com/bazelbuild/rules_go v0.40.0/go.mod h1:TMHmtfpvyfsxaqfL9WnahCsXMWDMICTw7XeK9yVb+YU= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= -github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/biogo/store v0.0.0-20160505134755-913427a1d5e8/go.mod h1:Iev9Q3MErcn+w3UOJD/DkEzllvugfdx7bGcMOFhvr/4= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/blacktear23/go-proxyprotocol v1.0.6 h1:eTt6UMpEnq59NjON49b3Cay8Dm0sCs1nDliwgkyEsRM= github.com/blacktear23/go-proxyprotocol v1.0.6/go.mod h1:FSCbgnRZrQXazBLL5snfBbrcFSMtcmUDhSRb9OfFA1o= github.com/bmatcuk/doublestar/v2 v2.0.4 h1:6I6oUiT/sU27eE2OFcWqBhL1SwjyvQuOssxT4a1yidI= @@ -163,11 +153,11 @@ github.com/butuzov/mirror v1.1.0/go.mod h1:8Q0BdQU6rC6WILDiBM60DBfvV78OLJmMmixe7 github.com/carlmjohnson/flagext v0.21.0 h1:/c4uK3ie786Z7caXLcIMvePNSSiH3bQVGDvmGLMme60= github.com/carlmjohnson/flagext v0.21.0/go.mod h1:Eenv0epIUAr4NuedNmkzI8WmBmjIxZC239XcKxYS2ac= github.com/cenk/backoff v2.0.0+incompatible/go.mod h1:7FtoeaSnHoZnmZzz47cM35Y9nSW7tNyaidugnHTaFDE= +github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEexRPVJrBSOjoqN5VNOIKJ5Q3RViH6eu3puDRwx4= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -190,14 +180,20 @@ github.com/cloudfoundry/gosigar v1.3.6 h1:gIc08FbB3QPb+nAQhINIK/qhf5REKkY0FTGgRG github.com/cloudfoundry/gosigar v1.3.6/go.mod h1:lNWstu5g5gw59O09Y+wsMNFzBSnU8a0u+Sfx4dq360E= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+gqO04wryn5h75LSazbRlnya1k= +github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cmux v0.0.0-20170110192607-30d10be49292/go.mod h1:qRiX68mZX1lGBkTWyp3CLcenw9I94W2dLeRvMzcn9N4= github.com/cockroachdb/cockroach v0.0.0-20170608034007-84bc9597164f/go.mod h1:xeT/CQ0qZHangbYbWShlCGAx31aV4AjGswDUjhKS6HQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/datadriven v1.0.0 h1:uhZrAfEayBecH2w2tZmhe20HJ7hDvrrA4x2Bg9YdZKM= github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= +github.com/cockroachdb/datadriven v1.0.2 h1:H9MtNqVoVhvd9nCBwOyDjUEdZCREqbIdCJD93PBm/jA= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= github.com/cockroachdb/errors v1.8.1 h1:A5+txlVZfOqFBDa4mGz2bUWSp0aHElvHX2bKkdbQu+Y= github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= @@ -218,24 +214,18 @@ github.com/coocood/freecache v1.2.1 h1:/v1CqMq45NFH9mp/Pt142reundeBM0dVUD3osQBeu github.com/coocood/freecache v1.2.1/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2 h1:NnLfQ77q0G4k2Of2c1ceQ0ec6MkLQyDp+IGdVM0D8XM= github.com/coocood/rtutil v0.0.0-20190304133409-c84515f646f2/go.mod h1:7qG7YFnOALvsx6tKTNmQot8d7cGFXM9TidzvRFLWYwM= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= -github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A= @@ -283,9 +273,12 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= +github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= @@ -303,9 +296,6 @@ github.com/felixge/httpsnoop v1.0.2 h1:+nS9g82KMXccJ/wp0zyRW9ZBHFETmMGtkk+2CTTrW github.com/felixge/httpsnoop v1.0.2/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible h1:/l4kBbb4/vGSsdtB5nUe8L7B9mImVMaBPw9L/0TBHU8= -github.com/form3tech-oss/jwt-go v3.2.5+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/form3tech-oss/jwt-go v3.2.6-0.20210809144907-32ab6a8243d7+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= @@ -321,7 +311,6 @@ github.com/fsouza/fake-gcs-server v1.44.0/go.mod h1:M02aKoTv9Tnlf+gmWnTok1PWVCUH github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= github.com/getsentry/raven-go v0.1.2/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= @@ -338,10 +327,8 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -376,14 +363,13 @@ github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= github.com/golang-jwt/jwt v3.2.1+incompatible h1:73Z+4BJcrTC+KczS6WvTPvRGOp1WmfEP4Q1lOd9Z/+c= github.com/golang-jwt/jwt v3.2.1+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.2.0 h1:besgBTC8w8HjP6NzQdxwKH9Z5oQMZ24ThTrHp3cZ8eU= -github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= +github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -413,7 +399,6 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= @@ -435,7 +420,6 @@ github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bz github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -479,18 +463,20 @@ github.com/google/pprof v0.0.0-20211122183932-1daafda22083/go.mod h1:KgnwoLYCZ8I github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio/v2 v2.0.0 h1:UifI23ZTGY8Tt29JbYFiuyIU3eX+RNFtUwefq9qAhxg= github.com/google/renameio/v2 v2.0.0/go.mod h1:BtmJXm5YlszgC+TD4HOEEUFgkJP3nLxehU6hfe7jRt4= +github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= +github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed h1:rZdD1GeRTHD1aG+VIvhQEYXurx6Wfg4QIT5YVl2tSC8= github.com/google/skylark v0.0.0-20181101142754-a5f7082aabed/go.mod h1:CKSX6SxHW1vp20ZNaeGe3TFFBIwCG6vaYrpAiOzX+NA= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= -github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.2.4 h1:uGy6JWR/uMIILU8wbf+OkstIrNiMjGpEIyhx8f6W7s4= +github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= -github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= +github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -516,13 +502,11 @@ github.com/gostaticanalysis/forcetypeassert v0.1.0/go.mod h1:qZEedyP/sY1lTGV1uJ3 github.com/gostaticanalysis/testutil v0.3.1-0.20210208050101-bfb5c8eec0e4/go.mod h1:D+FIZ+7OahH3ePw/izIEeH5I06eKs1IKI4Xr64/Am3M= github.com/gostaticanalysis/testutil v0.4.0 h1:nhdCmubdmDF6VEatUNjgUZBJKWRqugoISdUv3PPQgHY= github.com/gostaticanalysis/testutil v0.4.0/go.mod h1:bLIoPefWXrRi/ssLFWX1dx7Repi5x3CuviD3dgAZaBU= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= @@ -619,15 +603,12 @@ github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877 h1:O7syWuYG github.com/johannesboyne/gofakes3 v0.0.0-20230506070712-04da935ef877/go.mod h1:AxgWC4DDX54O2WDoQO1Ceabtn6IbktjU/7bigor+66g= github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df h1:Zrb0IbuLOGHL7nrO2WrcuNWgDTlzFv3zY69QMx4ggQE= github.com/joho/sqltocsv v0.0.0-20210428211105-a6d6801d59df/go.mod h1:mAVCUAYtW9NG31eB30umMSLKcDt6mCUWSjoSn5qBh0k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= @@ -638,7 +619,6 @@ github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0b github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= @@ -665,7 +645,6 @@ github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c/go.mod h1:4ZxfWkxwtc7dBeifERVVWRy9F9rTU9p0yCDgeCtlius= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -702,7 +681,6 @@ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2 github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a h1:N9zuLhTvBSRt0gWSiJswwQ2HqDmtX/ZCDJURnKUt1Ik= github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= @@ -750,7 +728,6 @@ github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= @@ -869,15 +846,12 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= @@ -885,16 +859,12 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI= github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY= github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64 h1:3DyLm+sTAJkfLyR/1pJ3L+fU2lFufWbpcgMFlGtqeyA= @@ -923,7 +893,6 @@ github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDN github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= github.com/rubyist/circuitbreaker v2.2.1+incompatible/go.mod h1:Ycs3JgJADPuzJDwffe12k6BZT8hxVi6lFK+gWYJLN4A= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -962,8 +931,6 @@ github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd h1:ug7PpSOB5RBPK1K github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -984,7 +951,6 @@ github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= @@ -993,7 +959,6 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spkg/bom v1.0.0 h1:S939THe0ukL5WcTGiGqkgtaW5JW+O6ITaIlpJXTYY64= github.com/spkg/bom v1.0.0/go.mod h1:lAz2VbTuYNcvs7iaFF8WW0ufXrHShJ7ck1fYFFbVXJs= github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4CcL/U= @@ -1016,7 +981,6 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tdakkota/asciicheck v0.2.0 h1:o8jvnUANo0qXtnslk2d3nMKTFNlOnJjRrNcj0j9qkHM= github.com/tdakkota/asciicheck v0.2.0/go.mod h1:Qb7Y9EgjCLJGup51gDHFzbI08/gbGhL/UVhYIPWG2rg= github.com/tenntenn/modver v1.0.1 h1:2klLppGhDgzJrScMpkj9Ujy3rXPUspSjAcev9tSEBgA= @@ -1027,8 +991,8 @@ github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 h1:mbAskLJ0oJf github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2/go.mod h1:2PfKggNGDuadAa0LElHrByyrz4JPZ9fFx6Gs7nx7ZZU= github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a h1:J/YdBZ46WKpXsxsW93SG+q0F8KI+yFrcIDT4c/RNoc4= github.com/tiancaiamao/gp v0.0.0-20221230034425-4025bc8a4d4a/go.mod h1:h4xBhSNtOeEosLJ4P7JyKXX7Cabg7AVkWCK5gV2vOrM= -github.com/tikv/client-go/v2 v2.0.8-0.20231010061802-07432ef6c031 h1:+D3p4UBbL1V3vCZ7FtaW9mZgeGq3U14+VfcHyiYn1mU= -github.com/tikv/client-go/v2 v2.0.8-0.20231010061802-07432ef6c031/go.mod h1:AjaU1PM3aVQih/zsYvc5sE9z7dAMxV+e881GNeZGTck= +github.com/tikv/client-go/v2 v2.0.8-0.20231030120815-1362f1e87566 h1:ULv8/h2S2daBtNDoovptSBC5fJEBKrx0K7E1K8iVOSw= +github.com/tikv/client-go/v2 v2.0.8-0.20231030120815-1362f1e87566/go.mod h1:XiEHwWZfJqgafxW/VEgi1ltGWB9yjwCJBs2kW1xHMY4= github.com/tikv/pd/client v0.0.0-20230912103610-2f57a9f050eb h1:hAcH9tFjQzQ3+ofrAHm4ajOTLliYCOfXpj3+boKOtac= github.com/tikv/pd/client v0.0.0-20230912103610-2f57a9f050eb/go.mod h1:E+6qtPu8fJm5kNjvKWPVFqSgNAFPk07y2EjD03GWzuI= github.com/timakin/bodyclose v0.0.0-20230421092635-574207250966 h1:quvGphlmUVU+nhpFa4gg4yJyTRJ13reZMDHrKwYw53M= @@ -1039,7 +1003,6 @@ github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7Am github.com/tklauser/numcpus v0.3.0/go.mod h1:yFGUr7TUHQRAhyqBcEg0Ge34zDBAsIvJJcyE6boqnA8= github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802 h1:uruHq4dN7GR16kFc5fp3d1RIYzJW5onx8Ybykw2YQFA= github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/twmb/murmur3 v1.1.6 h1:mqrRot1BRxm+Yct+vavLMou2/iJt0tNVTTC0QoIjaZg= @@ -1087,27 +1050,25 @@ github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5t github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd/api/v3 v3.5.2 h1:tXok5yLlKyuQ/SXSjtqHc4uzNaMqZi2XsoSPr/LlJXI= -go.etcd.io/etcd/api/v3 v3.5.2/go.mod h1:5GB2vv4A4AOn3yk7MftYGHkUfGtDHnEraIjym4dYz5A= -go.etcd.io/etcd/client/pkg/v3 v3.5.2 h1:4hzqQ6hIb3blLyQ8usCU4h3NghkqcsohEQ3o3VetYxE= -go.etcd.io/etcd/client/pkg/v3 v3.5.2/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.2 h1:ymrVwTkefuqA/rPkSW7/B4ApijbPVefRumkY+stNfS0= -go.etcd.io/etcd/client/v2 v2.305.2/go.mod h1:2D7ZejHVMIfog1221iLSYlQRzrtECw3kz4I4VAQm3qI= -go.etcd.io/etcd/client/v3 v3.5.2 h1:WdnejrUtQC4nCxK0/dLTMqKOB+U5TP/2Ya0BJL+1otA= -go.etcd.io/etcd/client/v3 v3.5.2/go.mod h1:kOOaWFFgHygyT0WlSmL8TJiXmMysO/nNUlEsSsN6W4o= -go.etcd.io/etcd/etcdutl/v3 v3.5.2/go.mod h1:f+KEUNxRzqQGq1Y/SsaDN5cmlOGRWgfE3lXEDi5F1Ys= -go.etcd.io/etcd/pkg/v3 v3.5.2 h1:YZUojdoPhOyl5QILYnR8LTUbbNefu/sV4ma+ZMr2tto= -go.etcd.io/etcd/pkg/v3 v3.5.2/go.mod h1:zsXz+9D/kijzRiG/UnFGDTyHKcVp0orwiO8iMLAi+k0= -go.etcd.io/etcd/raft/v3 v3.5.2 h1:uCC37qOXqBvKqTGHGyhASsaCsnTuJugl1GvneJNwHWo= -go.etcd.io/etcd/raft/v3 v3.5.2/go.mod h1:G6pCP1sFgbjod7/KnEHY0vHUViqxjkdt6AiKsD0GRr8= -go.etcd.io/etcd/server/v3 v3.5.2 h1:B6ytJvS4Fmt8nkjzS2/8POf4tuPhFMluE0lWd4dx/7U= -go.etcd.io/etcd/server/v3 v3.5.2/go.mod h1:mlG8znIEz4N/28GABrohZCBM11FqgGVQcpbcyJgh0j0= -go.etcd.io/etcd/tests/v3 v3.5.2 h1:uk7/uMGVebpBDl+roivowHt6gJ5Fnqwik3syDkoSKdo= -go.etcd.io/etcd/tests/v3 v3.5.2/go.mod h1:Jdzbei4uFi9C3xDBfCwckRXjlX0UPooiP4g/zXgBMgQ= +go.etcd.io/bbolt v1.3.8 h1:xs88BrvEv273UsB79e0hcVrlUWmS0a8upikMFhSyAtA= +go.etcd.io/bbolt v1.3.8/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.10 h1:szRajuUUbLyppkhs9K6BRtjY37l66XQQmw7oZRANE4k= +go.etcd.io/etcd/api/v3 v3.5.10/go.mod h1:TidfmT4Uycad3NM/o25fG3J07odo4GBB9hoxaodFCtI= +go.etcd.io/etcd/client/pkg/v3 v3.5.10 h1:kfYIdQftBnbAq8pUWFXfpuuxFSKzlmM5cSn76JByiT0= +go.etcd.io/etcd/client/pkg/v3 v3.5.10/go.mod h1:DYivfIviIuQ8+/lCq4vcxuseg2P2XbHygkKwFo9fc8U= +go.etcd.io/etcd/client/v2 v2.305.10 h1:MrmRktzv/XF8CvtQt+P6wLUlURaNpSDJHFZhe//2QE4= +go.etcd.io/etcd/client/v2 v2.305.10/go.mod h1:m3CKZi69HzilhVqtPDcjhSGp+kA1OmbNn0qamH80xjA= +go.etcd.io/etcd/client/v3 v3.5.10 h1:W9TXNZ+oB3MCd/8UjxHTWK5J9Nquw9fQBLJd5ne5/Ao= +go.etcd.io/etcd/client/v3 v3.5.10/go.mod h1:RVeBnDz2PUEZqTpgqwAtUd8nAPf5kjyFyND7P1VkOKc= +go.etcd.io/etcd/pkg/v3 v3.5.10 h1:WPR8K0e9kWl1gAhB5A7gEa5ZBTNkT9NdNWrR8Qpo1CM= +go.etcd.io/etcd/pkg/v3 v3.5.10/go.mod h1:TKTuCKKcF1zxmfKWDkfz5qqYaE3JncKKZPFf8c1nFUs= +go.etcd.io/etcd/raft/v3 v3.5.10 h1:cgNAYe7xrsrn/5kXMSaH8kM/Ky8mAdMqGOxyYwpP0LA= +go.etcd.io/etcd/raft/v3 v3.5.10/go.mod h1:odD6kr8XQXTy9oQnyMPBOr0TVe+gT0neQhElQ6jbGRc= +go.etcd.io/etcd/server/v3 v3.5.10 h1:4NOGyOwD5sUZ22PiWYKmfxqoeh72z6EhYjNosKGLmZg= +go.etcd.io/etcd/server/v3 v3.5.10/go.mod h1:gBplPHfs6YI0L+RpGkTQO7buDbHv5HJGG/Bst0/zIPo= +go.etcd.io/etcd/tests/v3 v3.5.10 h1:F1pbXwKxwZ58aBT2+CSL/r8WUCAVhob0y1y8OVJ204s= +go.etcd.io/etcd/tests/v3 v3.5.10/go.mod h1:vVMWDv9OhopxfJCd+CMI4pih0zUDqlkJj6JcBNlUVXI= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1117,28 +1078,21 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 h1:sO4WKdPAudZGKPcpZT4MJn6JaDmpyLrMPDGGyA1SttE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/proto/otlp v0.7.0 h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0 h1:Wx7nFnvCaissIUZxPkBqDz2963Z+Cl+PkYbDKzTxDqQ= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= +go.opentelemetry.io/otel v1.0.1 h1:4XKyXmfqJLOQ7feyV5DB6gsBFZ0ltB8vLtp6pj4JIcc= +go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1 h1:ofMbch7i29qIUf7VtF+r0HRF6ac0SBaPSziSsKp7wkk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.0.1/go.mod h1:Kv8liBeVNFkkkbilbgWRpV+wWuu+H5xdOT6HAgd30iw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1 h1:CFMFNoz+CGprjFAFy+RJFrfEe4GBia3RRm2a4fREvCA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.0.1/go.mod h1:xOvWoTOrQjxjW61xtOmD/WKGRYb/P4NzRo3bs65U6Rk= +go.opentelemetry.io/otel/sdk v1.0.1 h1:wXxFEWGo7XfXupPwVJvTBOaPBC9FEg0wB8hMNrKk+cA= +go.opentelemetry.io/otel/sdk v1.0.1/go.mod h1:HrdXne+BiwsOHYYkBE5ysIcv2bvdZstxzmCQhxTcZkI= +go.opentelemetry.io/otel/trace v1.0.1 h1:StTeIH6Q3G4r0Fiw34LTokUFESZgIDUr0qIJ7mKmAfw= +go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.9.0 h1:C0g6TWmQYvjKRnljRULLWUVJGy8Uvu0NEL/5frY2/t4= +go.opentelemetry.io/proto/otlp v0.9.0/go.mod h1:1vKfU9rv61e9EVGthD1zNvUbiwPcimSsOPU9brfSHJg= go.starlark.net v0.0.0-20210223155950-e043a3d3c984/go.mod h1:t3mmBBPzAVvK0L0n1drDmrQsJ8FoIx4INCqVMTr/Zo0= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= @@ -1154,8 +1108,8 @@ go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/mock v0.2.0 h1:TaP3xedm7JaAgScZO7tlvlKrqT0p7I6OsdGB5YNSMDU= -go.uber.org/mock v0.2.0/go.mod h1:J0y0rp9L3xiff1+ZBfKxlC1fz2+aO16tw0tsDOixfuM= +go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= +go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -1168,7 +1122,6 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.12.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= @@ -1186,10 +1139,10 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220518034528-6f7dac969898/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= @@ -1227,7 +1180,6 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= @@ -1264,7 +1216,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1308,8 +1259,8 @@ golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1319,7 +1270,6 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1357,7 +1307,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1372,10 +1321,7 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1383,8 +1329,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816074244-15123e1e1f71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1423,9 +1369,9 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= @@ -1436,7 +1382,6 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -1467,7 +1412,6 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1499,7 +1443,6 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.1.1-0.20210205202024-ef80cdb6ec6d/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1-0.20210302220138-2ac05c832e1a/go.mod h1:9bzcO0MWcOuT0tm1iBGzDVPshzfwoVvREIui8C+MHqU= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= @@ -1541,8 +1484,8 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= -google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= +google.golang.org/api v0.128.0 h1:RjPESny5CnQRn9V6siglged+DZCgfu9l6mO9dkX9VOg= +google.golang.org/api v0.128.0/go.mod h1:Y611qgqaE92On/7g65MQgxYul3c0rEB894kniWLY750= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1584,9 +1527,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 h1:KpwkzHKEF7B9Zxg18WzOa7djJ+Ha5DzthMyZYQfEn2A= -google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1/go.mod h1:nKE/iIaLqn2bQwXBg8f1g2Ylh6r5MN5CmZvuzZCgsCU= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA= +google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= google.golang.org/grpc v0.0.0-20180607172857-7a6a684ca69e/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -1607,10 +1553,12 @@ google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.54.0 h1:EhTqbhiYeixwWQtAEZAxmV9MGqcjEU2mFx52xCzNyag= -google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1623,6 +1571,7 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= @@ -1640,7 +1589,6 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8 gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= @@ -1659,7 +1607,6 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1702,7 +1649,6 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h6 sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0 h1:ucqkfpjg9WzSUubAO62csmucvxl4/JeW3F4I4909XkM= diff --git a/pkg/bindinfo/BUILD.bazel b/pkg/bindinfo/BUILD.bazel index 333c75d7934bb..146db49d9036f 100644 --- a/pkg/bindinfo/BUILD.bazel +++ b/pkg/bindinfo/BUILD.bazel @@ -33,6 +33,7 @@ go_library( "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/parser", + "//pkg/util/sqlescape", "//pkg/util/sqlexec", "//pkg/util/stmtsummary/v2:stmtsummary", "//pkg/util/table-filter", diff --git a/pkg/bindinfo/handle.go b/pkg/bindinfo/handle.go index 5cfa1bf4f78ab..4c7ad623c1fda 100644 --- a/pkg/bindinfo/handle.go +++ b/pkg/bindinfo/handle.go @@ -40,6 +40,7 @@ import ( "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/logutil" utilparser "github.com/pingcap/tidb/pkg/util/parser" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" stmtsummaryv2 "github.com/pingcap/tidb/pkg/util/stmtsummary/v2" tablefilter "github.com/pingcap/tidb/pkg/util/table-filter" @@ -600,7 +601,7 @@ func (h *BindHandle) lockBindInfoTable() error { // LockBindInfoSQL simulates LOCK TABLE by updating a same row in each pessimistic transaction. func (*BindHandle) LockBindInfoSQL() string { - sql, err := sqlexec.EscapeSQL("UPDATE mysql.bind_info SET source= %? WHERE original_sql= %?", Builtin, BuiltinPseudoSQL4BindLock) + sql, err := sqlescape.EscapeSQL("UPDATE mysql.bind_info SET source= %? WHERE original_sql= %?", Builtin, BuiltinPseudoSQL4BindLock) if err != nil { return "" } diff --git a/pkg/ddl/BUILD.bazel b/pkg/ddl/BUILD.bazel index d5fff640768c2..383611545501f 100644 --- a/pkg/ddl/BUILD.bazel +++ b/pkg/ddl/BUILD.bazel @@ -16,9 +16,9 @@ go_library( "backfilling_dispatcher.go", "backfilling_dist_scheduler.go", "backfilling_import_cloud.go", - "backfilling_import_local.go", "backfilling_merge_sort.go", "backfilling_operators.go", + "backfilling_proto.go", "backfilling_read_index.go", "backfilling_scheduler.go", "callback.go", @@ -128,7 +128,7 @@ go_library( "//pkg/util/codec", "//pkg/util/collate", "//pkg/util/dbterror", - "//pkg/util/disttask", + "//pkg/util/dbterror/exeerrors", "//pkg/util/domainutil", "//pkg/util/filter", "//pkg/util/gcutil", @@ -146,6 +146,7 @@ go_library( "//pkg/util/size", "//pkg/util/slice", "//pkg/util/sqlexec", + "//pkg/util/sqlkiller", "//pkg/util/stringutil", "//pkg/util/syncutil", "//pkg/util/timeutil", @@ -237,6 +238,7 @@ go_test( flaky = True, shard_count = 50, deps = [ + "//br/pkg/lightning/backend/external", "//pkg/autoid_service", "//pkg/config", "//pkg/ddl/copr", @@ -248,7 +250,9 @@ go_test( "//pkg/ddl/testutil", "//pkg/ddl/util", "//pkg/ddl/util/callback", + "//pkg/disttask/framework/dispatcher", "//pkg/disttask/framework/proto", + "//pkg/disttask/framework/storage", "//pkg/domain", "//pkg/domain/infosync", "//pkg/errno", diff --git a/pkg/ddl/OWNERS b/pkg/ddl/OWNERS new file mode 100644 index 0000000000000..75489727655a6 --- /dev/null +++ b/pkg/ddl/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-ddl diff --git a/pkg/ddl/backfilling_dispatcher.go b/pkg/ddl/backfilling_dispatcher.go index e90d4ba854250..598c8cceb3491 100644 --- a/pkg/ddl/backfilling_dispatcher.go +++ b/pkg/ddl/backfilling_dispatcher.go @@ -34,85 +34,111 @@ import ( "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/store/helper" "github.com/pingcap/tidb/pkg/table" - disttaskutil "github.com/pingcap/tidb/pkg/util/disttask" - "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/tikv/client-go/v2/tikv" "go.uber.org/zap" ) -type backfillingDispatcherExt struct { - d *ddl - previousSchedulerIDs []string +// BackfillingDispatcherExt is an extension of litBackfillDispatcher, exported for test. +type BackfillingDispatcherExt struct { + d *ddl + GlobalSort bool } -var _ dispatcher.Extension = (*backfillingDispatcherExt)(nil) - -// NewBackfillingDispatcherExt creates a new backfillingDispatcherExt. +// NewBackfillingDispatcherExt creates a new backfillingDispatcherExt, only used for test now. func NewBackfillingDispatcherExt(d DDL) (dispatcher.Extension, error) { ddl, ok := d.(*ddl) if !ok { return nil, errors.New("The getDDL result should be the type of *ddl") } - return &backfillingDispatcherExt{ + return &BackfillingDispatcherExt{ d: ddl, }, nil } +var _ dispatcher.Extension = (*BackfillingDispatcherExt)(nil) + // OnTick implements dispatcher.Extension interface. -func (*backfillingDispatcherExt) OnTick(_ context.Context, _ *proto.Task) { +func (*BackfillingDispatcherExt) OnTick(_ context.Context, _ *proto.Task) { } // OnNextSubtasksBatch generate batch of next step's plan. -func (h *backfillingDispatcherExt) OnNextSubtasksBatch( +func (dsp *BackfillingDispatcherExt) OnNextSubtasksBatch( ctx context.Context, taskHandle dispatcher.TaskHandle, gTask *proto.Task, - step proto.Step, + nextStep proto.Step, ) (taskMeta [][]byte, err error) { - var gTaskMeta BackfillGlobalMeta - if err := json.Unmarshal(gTask.Meta, &gTaskMeta); err != nil { + logger := logutil.BgLogger().With( + zap.Stringer("type", gTask.Type), + zap.Int64("task-id", gTask.ID), + zap.String("curr-step", StepStr(gTask.Step)), + zap.String("next-step", StepStr(nextStep)), + ) + var backfillMeta BackfillGlobalMeta + if err := json.Unmarshal(gTask.Meta, &backfillMeta); err != nil { return nil, err } - - job := &gTaskMeta.Job - useExtStore := len(gTaskMeta.CloudStorageURI) > 0 - - tblInfo, err := getTblInfo(h.d, job) + job := &backfillMeta.Job + tblInfo, err := getTblInfo(dsp.d, job) if err != nil { return nil, err } + logger.Info("on next subtasks batch") - // StepOne: read index and write to backend. - // StepTwo: do merge sort to reduce the global sort reader reads files count. Only used in global sort. - // StepThree: ingest data. // TODO: use planner. - switch step { - case proto.StepOne: + switch nextStep { + case StepReadIndex: if tblInfo.Partition != nil { return generatePartitionPlan(tblInfo) } - return generateNonPartitionPlan(h.d, tblInfo, job) - case proto.StepTwo: - gTaskMeta.UseMergeSort = true - if err := updateMeta(gTask, &gTaskMeta); err != nil { + is, err := dsp.GetEligibleInstances(ctx, gTask) + if err != nil { return nil, err } - return generateMergePlan(taskHandle, gTask) - case proto.StepThree: - if useExtStore { - prevStep := proto.StepOne - if gTaskMeta.UseMergeSort { - prevStep = proto.StepTwo + instanceCnt := len(is) + return generateNonPartitionPlan(dsp.d, tblInfo, job, dsp.GlobalSort, instanceCnt) + case StepMergeSort: + res, err := generateMergePlan(taskHandle, gTask, logger) + if err != nil { + return nil, err + } + if len(res) > 0 { + backfillMeta.UseMergeSort = true + if err := updateMeta(gTask, &backfillMeta); err != nil { + return nil, err } - return generateGlobalSortIngestPlan(ctx, taskHandle, gTask, job.ID, gTaskMeta.CloudStorageURI, prevStep) } - if tblInfo.Partition != nil { - return nil, nil + return res, nil + case StepWriteAndIngest: + if dsp.GlobalSort { + prevStep := StepReadIndex + if backfillMeta.UseMergeSort { + prevStep = StepMergeSort + } + + failpoint.Inject("mockWriteIngest", func() { + m := &BackfillSubTaskMeta{ + SortedKVMeta: external.SortedKVMeta{}, + } + metaBytes, _ := json.Marshal(m) + metaArr := make([][]byte, 0, 16) + metaArr = append(metaArr, metaBytes) + failpoint.Return(metaArr, nil) + }) + return generateGlobalSortIngestPlan( + ctx, + taskHandle, + gTask, + job.ID, + backfillMeta.CloudStorageURI, + prevStep, + logger) } - return generateIngestTaskPlan(ctx, h, taskHandle, gTask) + return nil, nil default: return nil, nil } @@ -127,55 +153,21 @@ func updateMeta(gTask *proto.Task, taskMeta *BackfillGlobalMeta) error { return nil } -func (*backfillingDispatcherExt) GetNextStep( - taskHandle dispatcher.TaskHandle, - task *proto.Task, -) proto.Step { +// GetNextStep implements dispatcher.Extension interface. +func (dsp *BackfillingDispatcherExt) GetNextStep(task *proto.Task) proto.Step { switch task.Step { case proto.StepInit: - return proto.StepOne - case proto.StepOne: - // when in tests - if taskHandle == nil { - return proto.StepThree - } - - var meta BackfillGlobalMeta - if err := json.Unmarshal(task.Meta, &meta); err != nil { - logutil.BgLogger().Info( - "unmarshal task meta met error", - zap.String("category", "ddl"), - zap.Error(err)) - } - // don't need merge step in local backend. - if len(meta.CloudStorageURI) == 0 { - return proto.StepThree - } - - // if data files overlaps too much, we need a merge step. - subTaskMetas, err := taskHandle.GetPreviousSubtaskMetas(task.ID, proto.StepInit) - if err != nil { - // TODO(lance6716): should we return error? - return proto.StepThree + return StepReadIndex + case StepReadIndex: + if dsp.GlobalSort { + return StepMergeSort } - multiStats := make([]external.MultipleFilesStat, 0, 100) - for _, bs := range subTaskMetas { - var subtask BackfillSubTaskMeta - err = json.Unmarshal(bs, &subtask) - if err != nil { - // TODO(lance6716): should we return error? - return proto.StepThree - } - multiStats = append(multiStats, subtask.MultipleFilesStats...) - } - if skipMergeSort(multiStats) { - return proto.StepThree - } - return proto.StepTwo - case proto.StepTwo: - return proto.StepThree + return proto.StepDone + case StepMergeSort: + return StepWriteAndIngest + case StepWriteAndIngest: + return proto.StepDone default: - // current step should be proto.StepThree return proto.StepDone } } @@ -188,48 +180,64 @@ func skipMergeSort(stats []external.MultipleFilesStat) bool { } // OnErrStage generate error handling stage's plan. -func (*backfillingDispatcherExt) OnErrStage(_ context.Context, _ dispatcher.TaskHandle, task *proto.Task, receiveErr []error) (meta []byte, err error) { +func (*BackfillingDispatcherExt) OnErrStage(_ context.Context, _ dispatcher.TaskHandle, task *proto.Task, receiveErrs []error) (meta []byte, err error) { // We do not need extra meta info when rolling back - firstErr := receiveErr[0] + logger := logutil.BgLogger().With( + zap.Stringer("type", task.Type), + zap.Int64("task-id", task.ID), + zap.String("step", StepStr(task.Step)), + ) + logger.Info("on error stage", zap.Errors("errors", receiveErrs)) + firstErr := receiveErrs[0] task.Error = firstErr return nil, nil } -func (h *backfillingDispatcherExt) GetEligibleInstances(ctx context.Context, _ *proto.Task) ([]*infosync.ServerInfo, error) { +// GetEligibleInstances implements dispatcher.Extension interface. +func (*BackfillingDispatcherExt) GetEligibleInstances(ctx context.Context, _ *proto.Task) ([]*infosync.ServerInfo, error) { serverInfos, err := dispatcher.GenerateSchedulerNodes(ctx) if err != nil { return nil, err } - if len(h.previousSchedulerIDs) > 0 { - // Only the nodes that executed step one can have step two. - involvedServerInfos := make([]*infosync.ServerInfo, 0, len(serverInfos)) - for _, id := range h.previousSchedulerIDs { - if idx := disttaskutil.FindServerInfo(serverInfos, id); idx >= 0 { - involvedServerInfos = append(involvedServerInfos, serverInfos[idx]) - } - } - return involvedServerInfos, nil - } return serverInfos, nil } // IsRetryableErr implements dispatcher.Extension.IsRetryableErr interface. -func (*backfillingDispatcherExt) IsRetryableErr(error) bool { +func (*BackfillingDispatcherExt) IsRetryableErr(error) bool { return true } -type litBackfillDispatcher struct { +// LitBackfillDispatcher wraps BaseDispatcher. +type LitBackfillDispatcher struct { *dispatcher.BaseDispatcher + d *ddl } -func newLitBackfillDispatcher(ctx context.Context, taskMgr dispatcher.TaskManager, - serverID string, task *proto.Task, handle dispatcher.Extension) dispatcher.Dispatcher { - dis := litBackfillDispatcher{ +func newLitBackfillDispatcher(ctx context.Context, d *ddl, taskMgr dispatcher.TaskManager, + serverID string, task *proto.Task) dispatcher.Dispatcher { + dsp := LitBackfillDispatcher{ + d: d, BaseDispatcher: dispatcher.NewBaseDispatcher(ctx, taskMgr, serverID, task), } - dis.BaseDispatcher.Extension = handle - return &dis + return &dsp +} + +// Init implements BaseDispatcher interface. +func (dsp *LitBackfillDispatcher) Init() (err error) { + taskMeta := &BackfillGlobalMeta{} + if err = json.Unmarshal(dsp.BaseDispatcher.Task.Meta, taskMeta); err != nil { + return errors.Annotate(err, "unmarshal task meta failed") + } + dsp.BaseDispatcher.Extension = &BackfillingDispatcherExt{ + d: dsp.d, + GlobalSort: len(taskMeta.CloudStorageURI) > 0} + return dsp.BaseDispatcher.Init() +} + +// Close implements BaseDispatcher interface. +func (dsp *LitBackfillDispatcher) Close() { + dsp.BaseDispatcher.Close() } func getTblInfo(d *ddl, job *model.Job) (tblInfo *model.TableInfo, err error) { @@ -267,7 +275,8 @@ func generatePartitionPlan(tblInfo *model.TableInfo) (metas [][]byte, err error) return subTaskMetas, nil } -func generateNonPartitionPlan(d *ddl, tblInfo *model.TableInfo, job *model.Job) (metas [][]byte, err error) { +func generateNonPartitionPlan( + d *ddl, tblInfo *model.TableInfo, job *model.Job, useCloud bool, instanceCnt int) (metas [][]byte, err error) { tbl, err := getTable(d.store, job.SchemaID, tblInfo) if err != nil { return nil, err @@ -290,8 +299,15 @@ func generateNonPartitionPlan(d *ddl, tblInfo *model.TableInfo, job *model.Job) return nil, err } - subTaskMetas := make([][]byte, 0, 100) - regionBatch := 20 + regionBatch := 100 + if !useCloud { + // Make subtask large enough to reduce the overhead of local/global flush. + quota := variable.DDLDiskQuota.Load() + regionBatch = int(int64(quota) / int64(config.SplitRegionSize)) + } + regionBatch = min(regionBatch, len(recordRegionMetas)/instanceCnt) + + subTaskMetas := make([][]byte, 0, 4) sort.Slice(recordRegionMetas, func(i, j int) bool { return bytes.Compare(recordRegionMetas[i].StartKey(), recordRegionMetas[j].StartKey()) < 0 }) @@ -301,7 +317,12 @@ func generateNonPartitionPlan(d *ddl, tblInfo *model.TableInfo, job *model.Job) end = len(recordRegionMetas) } batch := recordRegionMetas[i:end] - subTaskMeta := &BackfillSubTaskMeta{StartKey: batch[0].StartKey(), EndKey: batch[len(batch)-1].EndKey()} + subTaskMeta := &BackfillSubTaskMeta{ + SortedKVMeta: external.SortedKVMeta{ + StartKey: batch[0].StartKey(), + EndKey: batch[len(batch)-1].EndKey(), + }, + } if i == 0 { subTaskMeta.StartKey = startKey } @@ -317,42 +338,6 @@ func generateNonPartitionPlan(d *ddl, tblInfo *model.TableInfo, job *model.Job) return subTaskMetas, nil } -func generateIngestTaskPlan( - ctx context.Context, - h *backfillingDispatcherExt, - taskHandle dispatcher.TaskHandle, - gTask *proto.Task, -) ([][]byte, error) { - // We dispatch dummy subtasks because the rest data in local engine will be imported - // in the initialization of subtask executor. - var ingestSubtaskCnt int - if intest.InTest && taskHandle == nil { - serverNodes, err := dispatcher.GenerateSchedulerNodes(ctx) - if err != nil { - return nil, err - } - ingestSubtaskCnt = len(serverNodes) - } else { - schedulerIDs, err := taskHandle.GetPreviousSchedulerIDs(ctx, gTask.ID, gTask.Step) - if err != nil { - return nil, err - } - h.previousSchedulerIDs = schedulerIDs - ingestSubtaskCnt = len(schedulerIDs) - } - - subTaskMetas := make([][]byte, 0, ingestSubtaskCnt) - dummyMeta := &BackfillSubTaskMeta{} - metaBytes, err := json.Marshal(dummyMeta) - if err != nil { - return nil, err - } - for i := 0; i < ingestSubtaskCnt; i++ { - subTaskMetas = append(subTaskMetas, metaBytes) - } - return subTaskMetas, nil -} - func generateGlobalSortIngestPlan( ctx context.Context, taskHandle dispatcher.TaskHandle, @@ -360,8 +345,9 @@ func generateGlobalSortIngestPlan( jobID int64, cloudStorageURI string, step proto.Step, + logger *zap.Logger, ) ([][]byte, error) { - firstKey, lastKey, totalSize, dataFiles, statFiles, err := getSummaryFromLastStep(taskHandle, task.ID, step) + startKeyFromSumm, endKeyFromSumm, totalSize, dataFiles, statFiles, err := getSummaryFromLastStep(taskHandle, task.ID, step) if err != nil { return nil, err } @@ -370,19 +356,19 @@ func generateGlobalSortIngestPlan( return nil, err } splitter, err := getRangeSplitter( - ctx, cloudStorageURI, jobID, int64(totalSize), int64(len(instanceIDs)), dataFiles, statFiles) + ctx, cloudStorageURI, jobID, int64(totalSize), int64(len(instanceIDs)), dataFiles, statFiles, logger) if err != nil { return nil, err } defer func() { err := splitter.Close() if err != nil { - logutil.Logger(ctx).Error("failed to close range splitter", zap.Error(err)) + logger.Error("failed to close range splitter", zap.Error(err)) } }() metaArr := make([][]byte, 0, 16) - startKey := firstKey + startKey := startKeyFromSumm var endKey kv.Key for { endKeyOfGroup, dataFiles, statFiles, rangeSplitKeys, err := splitter.SplitOneRangesGroup() @@ -390,11 +376,11 @@ func generateGlobalSortIngestPlan( return nil, err } if len(endKeyOfGroup) == 0 { - endKey = lastKey.Next() + endKey = endKeyFromSumm } else { endKey = kv.Key(endKeyOfGroup).Clone() } - logutil.Logger(ctx).Info("split subtask range", + logger.Info("split subtask range", zap.String("startKey", hex.EncodeToString(startKey)), zap.String("endKey", hex.EncodeToString(endKey))) if startKey.Cmp(endKey) >= 0 { @@ -403,12 +389,12 @@ func generateGlobalSortIngestPlan( } m := &BackfillSubTaskMeta{ SortedKVMeta: external.SortedKVMeta{ - MinKey: startKey, - MaxKey: endKey, - DataFiles: dataFiles, - StatFiles: statFiles, + StartKey: startKey, + EndKey: endKey, TotalKVSize: totalSize / uint64(len(instanceIDs)), }, + DataFiles: dataFiles, + StatFiles: statFiles, RangeSplitKeys: rangeSplitKeys, } metaBytes, err := json.Marshal(m) @@ -426,8 +412,30 @@ func generateGlobalSortIngestPlan( func generateMergePlan( taskHandle dispatcher.TaskHandle, task *proto.Task, + logger *zap.Logger, ) ([][]byte, error) { - _, _, _, dataFiles, _, err := getSummaryFromLastStep(taskHandle, task.ID, proto.StepOne) + // check data files overlaps, + // if data files overlaps too much, we need a merge step. + subTaskMetas, err := taskHandle.GetPreviousSubtaskMetas(task.ID, StepReadIndex) + if err != nil { + return nil, err + } + multiStats := make([]external.MultipleFilesStat, 0, 100) + for _, bs := range subTaskMetas { + var subtask BackfillSubTaskMeta + err = json.Unmarshal(bs, &subtask) + if err != nil { + return nil, err + } + multiStats = append(multiStats, subtask.MultipleFilesStats...) + } + if skipMergeSort(multiStats) { + logger.Info("skip merge sort") + return nil, nil + } + + // generate merge sort plan. + _, _, _, dataFiles, _, err := getSummaryFromLastStep(taskHandle, task.ID, StepReadIndex) if err != nil { return nil, err } @@ -441,9 +449,7 @@ func generateMergePlan( end = len(dataFiles) } m := &BackfillSubTaskMeta{ - SortedKVMeta: external.SortedKVMeta{ - DataFiles: dataFiles[start:end], - }, + DataFiles: dataFiles[start:end], } metaBytes, err := json.Marshal(m) if err != nil { @@ -463,6 +469,7 @@ func getRangeSplitter( totalSize int64, instanceCnt int64, dataFiles, statFiles []string, + logger *zap.Logger, ) (*external.RangeSplitter, error) { backend, err := storage.ParseBackend(cloudStorageURI, nil) if err != nil { @@ -486,25 +493,24 @@ func getRangeSplitter( } maxSizePerRange, maxKeysPerRange, err := local.GetRegionSplitSizeKeys(ctx) if err != nil { - logutil.Logger(ctx).Warn("fail to get region split keys and size", zap.Error(err)) + logger.Warn("fail to get region split keys and size", zap.Error(err)) } maxSizePerRange = max(maxSizePerRange, int64(config.SplitRegionSize)) maxKeysPerRange = max(maxKeysPerRange, int64(config.SplitRegionKeys)) return external.NewRangeSplitter(ctx, dataFiles, statFiles, extStore, - rangeGroupSize, rangeGroupKeys, maxSizePerRange, maxKeysPerRange) + rangeGroupSize, rangeGroupKeys, maxSizePerRange, maxKeysPerRange, true) } func getSummaryFromLastStep( taskHandle dispatcher.TaskHandle, gTaskID int64, step proto.Step, -) (min, max kv.Key, totalKVSize uint64, dataFiles, statFiles []string, err error) { +) (startKey, endKey kv.Key, totalKVSize uint64, dataFiles, statFiles []string, err error) { subTaskMetas, err := taskHandle.GetPreviousSubtaskMetas(gTaskID, step) if err != nil { return nil, nil, 0, nil, nil, errors.Trace(err) } - var minKey, maxKey kv.Key allDataFiles := make([]string, 0, 16) allStatFiles := make([]string, 0, 16) for _, subTaskMeta := range subTaskMetas { @@ -513,10 +519,22 @@ func getSummaryFromLastStep( if err != nil { return nil, nil, 0, nil, nil, errors.Trace(err) } - // Skip empty subtask.MinKey/MaxKey because it means + // Skip empty subtask.StartKey/EndKey because it means // no records need to be written in this subtask. - minKey = external.NotNilMin(minKey, subtask.MinKey) - maxKey = external.NotNilMax(maxKey, subtask.MaxKey) + if subtask.StartKey == nil || subtask.EndKey == nil { + continue + } + + if len(startKey) == 0 { + startKey = subtask.StartKey + } else { + startKey = external.BytesMin(startKey, subtask.StartKey) + } + if len(endKey) == 0 { + endKey = subtask.EndKey + } else { + endKey = external.BytesMax(endKey, subtask.EndKey) + } totalKVSize += subtask.TotalKVSize for _, stat := range subtask.MultipleFilesStats { @@ -526,5 +544,23 @@ func getSummaryFromLastStep( } } } - return minKey, maxKey, totalKVSize, allDataFiles, allStatFiles, nil + return startKey, endKey, totalKVSize, allDataFiles, allStatFiles, nil +} + +// StepStr convert proto.Step to string. +func StepStr(step proto.Step) string { + switch step { + case proto.StepInit: + return "init" + case StepReadIndex: + return "read-index" + case StepMergeSort: + return "merge-sort" + case StepWriteAndIngest: + return "write&ingest" + case proto.StepDone: + return "done" + default: + return "unknown" + } } diff --git a/pkg/ddl/backfilling_dispatcher_test.go b/pkg/ddl/backfilling_dispatcher_test.go index d3001f7e6365c..87ed8bc88fac0 100644 --- a/pkg/ddl/backfilling_dispatcher_test.go +++ b/pkg/ddl/backfilling_dispatcher_test.go @@ -20,18 +20,34 @@ import ( "testing" "time" + "github.com/ngaut/pools" "github.com/pingcap/errors" + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/br/pkg/lightning/backend/external" "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/disttask/framework/dispatcher" "github.com/pingcap/tidb/pkg/disttask/framework/proto" + "github.com/pingcap/tidb/pkg/disttask/framework/storage" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/meta" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util/logutil" "github.com/stretchr/testify/require" + "github.com/tikv/client-go/v2/util" + "go.uber.org/zap" ) -func TestBackfillingDispatcher(t *testing.T) { +func TestBackfillingDispatcherLocalMode(t *testing.T) { + /// test str + require.Equal(t, "init", ddl.StepStr(proto.StepInit)) + require.Equal(t, "read-index", ddl.StepStr(ddl.StepReadIndex)) + require.Equal(t, "merge-sort", ddl.StepStr(ddl.StepMergeSort)) + require.Equal(t, "write&ingest", ddl.StepStr(ddl.StepWriteAndIngest)) + require.Equal(t, "done", ddl.StepStr(proto.StepDone)) + require.Equal(t, "unknown", ddl.StepStr(111)) + store, dom := testkit.CreateMockStoreAndDomain(t) dsp, err := ddl.NewBackfillingDispatcherExt(dom.DDL()) require.NoError(t, err) @@ -44,14 +60,14 @@ func TestBackfillingDispatcher(t *testing.T) { "PARTITION p1 VALUES LESS THAN (100),\n" + "PARTITION p2 VALUES LESS THAN (1000),\n" + "PARTITION p3 VALUES LESS THAN MAXVALUE\n);") - gTask := createAddIndexGlobalTask(t, dom, "test", "tp1", proto.Backfill) + gTask := createAddIndexGlobalTask(t, dom, "test", "tp1", proto.Backfill, false) tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("tp1")) require.NoError(t, err) tblInfo := tbl.Meta() // 1.1 OnNextSubtasksBatch - gTask.Step = dsp.GetNextStep(nil, gTask) - require.Equal(t, proto.StepOne, gTask.Step) + gTask.Step = dsp.GetNextStep(gTask) + require.Equal(t, ddl.StepReadIndex, gTask.Step) metas, err := dsp.OnNextSubtasksBatch(context.Background(), nil, gTask, gTask.Step) require.NoError(t, err) require.Equal(t, len(tblInfo.Partition.Definitions), len(metas)) @@ -61,15 +77,9 @@ func TestBackfillingDispatcher(t *testing.T) { require.Equal(t, par.ID, subTask.PhysicalTableID) } - // 1.2 test partition table OnNextSubtasksBatch after StepInit finished. + // 1.2 test partition table OnNextSubtasksBatch after StepReadIndex gTask.State = proto.TaskStateRunning - gTask.Step = dsp.GetNextStep(nil, gTask) - require.Equal(t, proto.StepThree, gTask.Step) - // for partition table, we will not generate subtask for StepThree. - metas, err = dsp.OnNextSubtasksBatch(context.Background(), nil, gTask, gTask.Step) - require.NoError(t, err) - require.Len(t, metas, 0) - gTask.Step = dsp.GetNextStep(nil, gTask) + gTask.Step = dsp.GetNextStep(gTask) require.Equal(t, proto.StepDone, gTask.Step) metas, err = dsp.OnNextSubtasksBatch(context.Background(), nil, gTask, gTask.Step) require.NoError(t, err) @@ -87,7 +97,7 @@ func TestBackfillingDispatcher(t *testing.T) { /// 2. test non partition table. // 2.1 empty table tk.MustExec("create table t1(id int primary key, v int)") - gTask = createAddIndexGlobalTask(t, dom, "test", "t1", proto.Backfill) + gTask = createAddIndexGlobalTask(t, dom, "test", "t1", proto.Backfill, false) metas, err = dsp.OnNextSubtasksBatch(context.Background(), nil, gTask, gTask.Step) require.NoError(t, err) require.Equal(t, 0, len(metas)) @@ -97,28 +107,174 @@ func TestBackfillingDispatcher(t *testing.T) { tk.MustExec("insert into t2 values (), (), (), (), (), ()") tk.MustExec("insert into t2 values (), (), (), (), (), ()") tk.MustExec("insert into t2 values (), (), (), (), (), ()") - gTask = createAddIndexGlobalTask(t, dom, "test", "t2", proto.Backfill) + gTask = createAddIndexGlobalTask(t, dom, "test", "t2", proto.Backfill, false) // 2.2.1 stepInit - gTask.Step = dsp.GetNextStep(nil, gTask) + gTask.Step = dsp.GetNextStep(gTask) metas, err = dsp.OnNextSubtasksBatch(context.Background(), nil, gTask, gTask.Step) require.NoError(t, err) require.Equal(t, 1, len(metas)) - require.Equal(t, proto.StepOne, gTask.Step) - // 2.2.2 stepOne + require.Equal(t, ddl.StepReadIndex, gTask.Step) + // 2.2.2 StepReadIndex gTask.State = proto.TaskStateRunning - gTask.Step = dsp.GetNextStep(nil, gTask) - require.Equal(t, proto.StepThree, gTask.Step) - metas, err = dsp.OnNextSubtasksBatch(context.Background(), nil, gTask, gTask.Step) - require.NoError(t, err) - require.Equal(t, 1, len(metas)) - gTask.Step = dsp.GetNextStep(nil, gTask) + gTask.Step = dsp.GetNextStep(gTask) require.Equal(t, proto.StepDone, gTask.Step) metas, err = dsp.OnNextSubtasksBatch(context.Background(), nil, gTask, gTask.Step) require.NoError(t, err) require.Equal(t, 0, len(metas)) } -func createAddIndexGlobalTask(t *testing.T, dom *domain.Domain, dbName, tblName string, taskType proto.TaskType) *proto.Task { +func TestBackfillingDispatcherGlobalSortMode(t *testing.T) { + // init test env. + store, dom := testkit.CreateMockStoreAndDomain(t) + tk := testkit.NewTestKit(t, store) + pool := pools.NewResourcePool(func() (pools.Resource, error) { + return tk.Session(), nil + }, 1, 1, time.Second) + defer pool.Close() + ctx := context.WithValue(context.Background(), "etcd", true) + mgr := storage.NewTaskManager(util.WithInternalSourceType(ctx, "taskManager"), pool) + storage.SetTaskManager(mgr) + dspManager, err := dispatcher.NewManager(util.WithInternalSourceType(ctx, "dispatcher"), mgr, "host:port") + require.NoError(t, err) + + tk.MustExec("use test") + tk.MustExec("create table t1(id bigint auto_random primary key)") + tk.MustExec("insert into t1 values (), (), (), (), (), ()") + tk.MustExec("insert into t1 values (), (), (), (), (), ()") + tk.MustExec("insert into t1 values (), (), (), (), (), ()") + tk.MustExec("insert into t1 values (), (), (), (), (), ()") + task := createAddIndexGlobalTask(t, dom, "test", "t1", proto.Backfill, true) + + dsp := dspManager.MockDispatcher(task) + ext, err := ddl.NewBackfillingDispatcherExt(dom.DDL()) + require.NoError(t, err) + ext.(*ddl.BackfillingDispatcherExt).GlobalSort = true + dsp.Extension = ext + + taskID, err := mgr.AddNewGlobalTask(task.Key, proto.Backfill, 1, task.Meta) + require.NoError(t, err) + task.ID = taskID + + // 1. to read-index stage + subtaskMetas, err := dsp.OnNextSubtasksBatch(ctx, dsp, task, dsp.GetNextStep(task)) + require.NoError(t, err) + require.Len(t, subtaskMetas, 1) + task.Step = ext.GetNextStep(task) + require.Equal(t, ddl.StepReadIndex, task.Step) + // update task/subtask, and finish subtask, so we can go to next stage + subtasks := make([]*proto.Subtask, 0, len(subtaskMetas)) + for _, m := range subtaskMetas { + subtasks = append(subtasks, proto.NewSubtask(task.Step, task.ID, task.Type, "", m)) + } + _, err = mgr.UpdateGlobalTaskAndAddSubTasks(task, subtasks, proto.TaskStatePending) + require.NoError(t, err) + gotSubtasks, err := mgr.GetSubtasksForImportInto(taskID, ddl.StepReadIndex) + require.NoError(t, err) + logutil.BgLogger().Info("ywq test", zap.Any("len", len(gotSubtasks))) + + // update meta, same as import into. + sortStepMeta := &ddl.BackfillSubTaskMeta{ + SortedKVMeta: external.SortedKVMeta{ + StartKey: []byte("ta"), + EndKey: []byte("tc"), + TotalKVSize: 12, + MultipleFilesStats: []external.MultipleFilesStat{ + { + Filenames: [][2]string{ + {"gs://sort-bucket/data/1", "gs://sort-bucket/data/1.stat"}, + }, + }, + }, + }, + } + sortStepMetaBytes, err := json.Marshal(sortStepMeta) + require.NoError(t, err) + for _, s := range gotSubtasks { + require.NoError(t, mgr.FinishSubtask(s.ID, sortStepMetaBytes)) + } + // 2. to merge-sort stage. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/forceMergeSort", `return()`)) + t.Cleanup(func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/forceMergeSort")) + }) + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, dsp, task, ext.GetNextStep(task)) + require.NoError(t, err) + require.Len(t, subtaskMetas, 1) + task.Step = ext.GetNextStep(task) + require.Equal(t, ddl.StepMergeSort, task.Step) + + // update meta, same as import into. + subtasks = make([]*proto.Subtask, 0, len(subtaskMetas)) + for _, m := range subtaskMetas { + subtasks = append(subtasks, proto.NewSubtask(task.Step, task.ID, task.Type, "", m)) + } + _, err = mgr.UpdateGlobalTaskAndAddSubTasks(task, subtasks, proto.TaskStatePending) + require.NoError(t, err) + gotSubtasks, err = mgr.GetSubtasksForImportInto(taskID, task.Step) + require.NoError(t, err) + mergeSortStepMeta := &ddl.BackfillSubTaskMeta{ + SortedKVMeta: external.SortedKVMeta{ + StartKey: []byte("ta"), + EndKey: []byte("tc"), + TotalKVSize: 12, + MultipleFilesStats: []external.MultipleFilesStat{ + { + Filenames: [][2]string{ + {"gs://sort-bucket/data/1", "gs://sort-bucket/data/1.stat"}, + }, + }, + }, + }, + } + mergeSortStepMetaBytes, err := json.Marshal(mergeSortStepMeta) + require.NoError(t, err) + for _, s := range gotSubtasks { + require.NoError(t, mgr.FinishSubtask(s.ID, mergeSortStepMetaBytes)) + } + // 3. to write&ingest stage. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/mockWriteIngest", "return(true)")) + t.Cleanup(func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/mockWriteIngest")) + }) + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, dsp, task, ext.GetNextStep(task)) + require.NoError(t, err) + require.Len(t, subtaskMetas, 1) + task.Step = ext.GetNextStep(task) + require.Equal(t, ddl.StepWriteAndIngest, task.Step) + // 4. to done stage. + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, dsp, task, ext.GetNextStep(task)) + require.NoError(t, err) + require.Len(t, subtaskMetas, 0) + task.Step = ext.GetNextStep(task) + require.Equal(t, proto.StepDone, task.Step) +} + +func TestGetNextStep(t *testing.T) { + task := &proto.Task{ + Step: proto.StepInit, + } + ext := &ddl.BackfillingDispatcherExt{} + + // 1. local mode + for _, nextStep := range []proto.Step{ddl.StepReadIndex, proto.StepDone} { + require.Equal(t, nextStep, ext.GetNextStep(task)) + task.Step = nextStep + } + // 2. global sort mode + ext = &ddl.BackfillingDispatcherExt{GlobalSort: true} + task.Step = proto.StepInit + for _, nextStep := range []proto.Step{ddl.StepReadIndex, ddl.StepMergeSort, ddl.StepWriteAndIngest} { + require.Equal(t, nextStep, ext.GetNextStep(task)) + task.Step = nextStep + } +} + +func createAddIndexGlobalTask(t *testing.T, + dom *domain.Domain, + dbName, + tblName string, + taskType proto.TaskType, + useGlobalSort bool) *proto.Task { db, ok := dom.InfoSchema().SchemaByName(model.NewCIStr(dbName)) require.True(t, ok) tbl, err := dom.InfoSchema().TableByName(model.NewCIStr(dbName), model.NewCIStr(tblName)) @@ -142,6 +298,9 @@ func createAddIndexGlobalTask(t *testing.T, dom *domain.Domain, dbName, tblName EleIDs: []int64{10}, EleTypeKey: meta.IndexElementKey, } + if useGlobalSort { + taskMeta.CloudStorageURI = "gs://sort-bucket" + } gTaskMetaBytes, err := json.Marshal(taskMeta) require.NoError(t, err) diff --git a/pkg/ddl/backfilling_dist_scheduler.go b/pkg/ddl/backfilling_dist_scheduler.go index bf825d3d3ed14..8971cb5440c09 100644 --- a/pkg/ddl/backfilling_dist_scheduler.go +++ b/pkg/ddl/backfilling_dist_scheduler.go @@ -47,11 +47,11 @@ type BackfillGlobalMeta struct { // BackfillSubTaskMeta is the sub-task meta for backfilling index. type BackfillSubTaskMeta struct { - PhysicalTableID int64 `json:"physical_table_id"` - StartKey []byte `json:"start_key"` - EndKey []byte `json:"end_key"` + PhysicalTableID int64 `json:"physical_table_id"` RangeSplitKeys [][]byte `json:"range_split_keys"` + DataFiles []string `json:"data-files"` + StatFiles []string `json:"stat-files"` external.SortedKVMeta `json:",inline"` } @@ -93,7 +93,7 @@ func NewBackfillSubtaskExecutor(_ context.Context, taskMeta []byte, d *ddl, if len(bgm.CloudStorageURI) > 0 { return newCloudImportExecutor(&bgm.Job, jobMeta.ID, indexInfos[0], tbl.(table.PhysicalTable), bc, bgm.CloudStorageURI) } - return newImportFromLocalStepExecutor(jobMeta.ID, indexInfos, tbl.(table.PhysicalTable), bc), nil + return nil, errors.Errorf("local import does not have write & ingest step") default: return nil, errors.Errorf("unknown step %d for job %d", stage, jobMeta.ID) } diff --git a/pkg/ddl/backfilling_import_cloud.go b/pkg/ddl/backfilling_import_cloud.go index 546ffdd972def..b5f0e70749ec7 100644 --- a/pkg/ddl/backfilling_import_cloud.go +++ b/pkg/ddl/backfilling_import_cloud.go @@ -83,11 +83,12 @@ func (m *cloudImportExecutor) RunSubtask(ctx context.Context, subtask *proto.Sub StorageURI: m.cloudStoreURI, DataFiles: sm.DataFiles, StatFiles: sm.StatFiles, - MinKey: sm.MinKey, - MaxKey: sm.MaxKey, + StartKey: sm.StartKey, + EndKey: sm.EndKey, SplitKeys: sm.RangeSplitKeys, TotalFileSize: int64(sm.TotalKVSize), TotalKVCount: 0, + CheckHotspot: true, }, }, engineUUID) if err != nil { diff --git a/pkg/ddl/backfilling_import_local.go b/pkg/ddl/backfilling_import_local.go deleted file mode 100644 index 032f249261f94..0000000000000 --- a/pkg/ddl/backfilling_import_local.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2023 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package ddl - -import ( - "context" - - "github.com/pingcap/tidb/br/pkg/lightning/common" - "github.com/pingcap/tidb/pkg/ddl/ingest" - "github.com/pingcap/tidb/pkg/disttask/framework/proto" - "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/table" - "github.com/pingcap/tidb/pkg/util/logutil" -) - -type localImportExecutor struct { - jobID int64 - indexes []*model.IndexInfo - ptbl table.PhysicalTable - bc ingest.BackendCtx -} - -func newImportFromLocalStepExecutor( - jobID int64, - indexes []*model.IndexInfo, - ptbl table.PhysicalTable, - bc ingest.BackendCtx, -) *localImportExecutor { - return &localImportExecutor{ - jobID: jobID, - indexes: indexes, - ptbl: ptbl, - bc: bc, - } -} - -func (i *localImportExecutor) Init(ctx context.Context) error { - logutil.Logger(ctx).Info("local import executor init subtask exec env") - for _, index := range i.indexes { - _, _, err := i.bc.Flush(index.ID, ingest.FlushModeForceGlobal) - if err != nil { - if common.ErrFoundDuplicateKeys.Equal(err) { - err = convertToKeyExistsErr(err, index, i.ptbl.Meta()) - return err - } - } - } - return nil -} - -func (*localImportExecutor) RunSubtask(ctx context.Context, _ *proto.Subtask) error { - logutil.Logger(ctx).Info("local import executor run subtask") - return nil -} - -func (*localImportExecutor) Cleanup(ctx context.Context) error { - logutil.Logger(ctx).Info("local import executor cleanup subtask exec env") - return nil -} - -func (*localImportExecutor) OnFinished(ctx context.Context, _ *proto.Subtask) error { - logutil.Logger(ctx).Info("local import executor finish subtask") - return nil -} - -func (*localImportExecutor) Rollback(ctx context.Context) error { - logutil.Logger(ctx).Info("local import executor rollback subtask") - return nil -} diff --git a/pkg/ddl/backfilling_merge_sort.go b/pkg/ddl/backfilling_merge_sort.go index c51314f9e351f..32d27d7146a06 100644 --- a/pkg/ddl/backfilling_merge_sort.go +++ b/pkg/ddl/backfilling_merge_sort.go @@ -21,13 +21,13 @@ import ( "strconv" "sync" - "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/lightning/backend/external" "github.com/pingcap/tidb/br/pkg/storage" "github.com/pingcap/tidb/pkg/ddl/ingest" "github.com/pingcap/tidb/pkg/disttask/framework/proto" "github.com/pingcap/tidb/pkg/parser/model" + "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/logutil" @@ -78,9 +78,9 @@ func (m *mergeSortExecutor) RunSubtask(ctx context.Context, subtask *proto.Subta return err } - m.mu.Lock() m.subtaskSortedKVMeta = &external.SortedKVMeta{} onClose := func(summary *external.WriterSummary) { + m.mu.Lock() m.subtaskSortedKVMeta.MergeSummary(summary) m.mu.Unlock() } @@ -98,22 +98,12 @@ func (m *mergeSortExecutor) RunSubtask(ctx context.Context, subtask *proto.Subta return err } - writerID := uuid.New().String() prefix := path.Join(strconv.Itoa(int(m.jobID)), strconv.Itoa(int(subtask.ID))) // TODO: config generated by plan. - return external.MergeOverlappingFiles( - ctx, - sm.DataFiles, - store, - 64*1024, - prefix, - writerID, - 256*size.MB, - 8*1024, - 1*size.MB, - 8*1024, - onClose) + return external.MergeOverlappingFiles(ctx, sm.DataFiles, store, 64*1024, prefix, + external.DefaultBlockSize, 8*1024, 1*size.MB, 8*1024, onClose, + int(variable.GetDDLReorgWorkerCounter()), true) } func (*mergeSortExecutor) Cleanup(ctx context.Context) error { diff --git a/pkg/ddl/backfilling_operators.go b/pkg/ddl/backfilling_operators.go index f900cc229dccb..6d6190c2d1ff1 100644 --- a/pkg/ddl/backfilling_operators.go +++ b/pkg/ddl/backfilling_operators.go @@ -36,6 +36,7 @@ import ( util2 "github.com/pingcap/tidb/pkg/ddl/util" "github.com/pingcap/tidb/pkg/disttask/operator" "github.com/pingcap/tidb/pkg/kv" + "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/resourcemanager/pool/workerpool" @@ -45,6 +46,7 @@ import ( "github.com/pingcap/tidb/pkg/table" "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/tablecodec" + tidbutil "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/memory" @@ -402,6 +404,13 @@ type tableScanWorker struct { } func (w *tableScanWorker) HandleTask(task TableScanTask, sender func(IndexRecordChunk)) { + defer tidbutil.Recover(metrics.LblAddIndex, "handleTableScanTaskWithRecover", func() { + w.ctx.onError(errors.New("met panic in tableScanWorker")) + }, false) + + failpoint.Inject("injectPanicForTableScan", func() { + panic("mock panic") + }) if w.se == nil { sessCtx, err := w.sessPool.Get() if err != nil { @@ -604,6 +613,14 @@ func (w *indexIngestWorker) HandleTask(rs IndexRecordChunk, send func(IndexWrite w.srcChunkPool <- rs.Chunk } }() + defer tidbutil.Recover(metrics.LblAddIndex, "handleIndexIngtestTaskWithRecover", func() { + w.ctx.onError(errors.New("met panic in indexIngestWorker")) + }, false) + + failpoint.Inject("injectPanicForIndexIngest", func() { + panic("mock panic") + }) + result := IndexWriteResult{ ID: rs.ID, } @@ -734,13 +751,9 @@ func (s *indexWriteResultSink) flush() error { failpoint.Inject("mockFlushError", func(_ failpoint.Value) { failpoint.Return(errors.New("mock flush error")) }) - flushMode := ingest.FlushModeForceLocalAndCheckDiskQuota - if s.tbl.GetPartitionedTable() != nil { - flushMode = ingest.FlushModeForceGlobal - } for _, index := range s.indexes { idxInfo := index.Meta() - _, _, err := s.backendCtx.Flush(idxInfo.ID, flushMode) + _, _, err := s.backendCtx.Flush(idxInfo.ID, ingest.FlushModeForceGlobal) if err != nil { if common.ErrFoundDuplicateKeys.Equal(err) { err = convertToKeyExistsErr(err, idxInfo, s.tbl.Meta()) diff --git a/pkg/ddl/backfilling_proto.go b/pkg/ddl/backfilling_proto.go new file mode 100644 index 0000000000000..488124ba62ffe --- /dev/null +++ b/pkg/ddl/backfilling_proto.go @@ -0,0 +1,36 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ddl + +import "github.com/pingcap/tidb/pkg/disttask/framework/proto" + +// Steps of Add Index, each step is represented by one or multiple subtasks. +// the initial step is StepInit(-1) +// steps are processed in the following order: +// - local sort: +// StepInit -> StepReadIndex -> StepDone +// - global sort: +// StepInit -> StepReadIndex -> StepMergeSort -> StepWriteAndIngest -> StepDone +const ( + StepReadIndex proto.Step = 1 + // StepMergeSort only used in global sort, it will merge sorted kv from global storage, so we can have better + // read performance during StepWriteAndIngest with global sort. + // depends on how much kv files are overlapped. + // When kv files overlapped less than MergeSortOverlapThreshold, there‘re no subtasks. + StepMergeSort proto.Step = 2 + + // StepWriteAndIngest write sorted kv into TiKV and ingest it. + StepWriteAndIngest proto.Step = 3 +) diff --git a/pkg/ddl/backfilling_read_index.go b/pkg/ddl/backfilling_read_index.go index 6bc7ee52979ac..1f6427ee2f9c2 100644 --- a/pkg/ddl/backfilling_read_index.go +++ b/pkg/ddl/backfilling_read_index.go @@ -56,8 +56,6 @@ type readIndexSummary struct { minKey []byte maxKey []byte totalSize uint64 - dataFiles []string - statFiles []string stats []external.MultipleFilesStat mu sync.Mutex } @@ -175,16 +173,18 @@ func (r *readIndexExecutor) OnFinished(ctx context.Context, subtask *proto.Subta } sum, _ := r.subtaskSummary.LoadAndDelete(subtask.ID) s := sum.(*readIndexSummary) - subtaskMeta.MinKey = s.minKey - subtaskMeta.MaxKey = s.maxKey + subtaskMeta.StartKey = s.minKey + subtaskMeta.EndKey = kv.Key(s.maxKey).Next() subtaskMeta.TotalKVSize = s.totalSize - subtaskMeta.DataFiles = s.dataFiles - subtaskMeta.StatFiles = s.statFiles subtaskMeta.MultipleFilesStats = s.stats + fileCnt := 0 + for _, stat := range s.stats { + fileCnt += len(stat.Filenames) + } logutil.Logger(ctx).Info("get key boundary on subtask finished", zap.String("min", hex.EncodeToString(s.minKey)), zap.String("max", hex.EncodeToString(s.maxKey)), - zap.Int("fileCount", len(s.dataFiles)), + zap.Int("fileCount", fileCnt), zap.Uint64("totalSize", s.totalSize)) meta, err := json.Marshal(subtaskMeta) if err != nil { @@ -268,12 +268,6 @@ func (r *readIndexExecutor) buildExternalStorePipeline( } s.totalSize += summary.TotalSize s.stats = append(s.stats, summary.MultipleFilesStats...) - for _, f := range summary.MultipleFilesStats { - for _, filename := range f.Filenames { - s.dataFiles = append(s.dataFiles, filename[0]) - s.statFiles = append(s.statFiles, filename[1]) - } - } s.mu.Unlock() } counter := metrics.BackfillTotalCounter.WithLabelValues( diff --git a/pkg/ddl/backfilling_scheduler.go b/pkg/ddl/backfilling_scheduler.go index d70e0432879bd..4b0d6b12d6310 100644 --- a/pkg/ddl/backfilling_scheduler.go +++ b/pkg/ddl/backfilling_scheduler.go @@ -37,7 +37,6 @@ import ( "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" decoder "github.com/pingcap/tidb/pkg/util/rowDecoder" "go.uber.org/zap" ) @@ -159,14 +158,13 @@ func initSessCtx( sessCtx.GetSessionVars().StmtCtx.SetTimeZone(sessCtx.GetSessionVars().Location()) sessCtx.GetSessionVars().StmtCtx.BadNullAsWarning = !sqlMode.HasStrictMode() sessCtx.GetSessionVars().StmtCtx.OverflowAsWarning = !sqlMode.HasStrictMode() - sessCtx.GetSessionVars().StmtCtx.AllowInvalidDate = sqlMode.HasAllowInvalidDatesMode() sessCtx.GetSessionVars().StmtCtx.DividedByZeroAsWarning = !sqlMode.HasStrictMode() - sessCtx.GetSessionVars().StmtCtx.IgnoreZeroInDate = !sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode() - sessCtx.GetSessionVars().StmtCtx.NoZeroDate = sqlMode.HasStrictMode() - sessCtx.GetSessionVars().StmtCtx.SetTypeFlags(types.StrictFlags. + + typeFlags := types.StrictFlags. WithTruncateAsWarning(!sqlMode.HasStrictMode()). - WithClipNegativeToZero(true), - ) + WithIgnoreInvalidDateErr(sqlMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!sqlMode.HasStrictMode() || sqlMode.HasAllowInvalidDatesMode()) + sessCtx.GetSessionVars().StmtCtx.SetTypeFlags(typeFlags) // Prevent initializing the mock context in the workers concurrently. // For details, see https://github.com/pingcap/tidb/issues/40879. @@ -176,7 +174,7 @@ func initSessCtx( func (*txnBackfillScheduler) expectedWorkerSize() (size int) { workerCnt := int(variable.GetDDLReorgWorkerCounter()) - return mathutil.Min(workerCnt, maxBackfillWorkerSize) + return min(workerCnt, maxBackfillWorkerSize) } func (b *txnBackfillScheduler) currentWorkerSize() int { @@ -219,6 +217,9 @@ func (b *txnBackfillScheduler) adjustWorkerSize() error { case typeUpdateColumnWorker: // Setting InCreateOrAlterStmt tells the difference between SELECT casting and ALTER COLUMN casting. sessCtx.GetSessionVars().StmtCtx.InCreateOrAlterStmt = true + sessCtx.GetSessionVars().StmtCtx.SetTypeFlags( + sessCtx.GetSessionVars().StmtCtx.TypeFlags(). + WithIgnoreZeroDateErr(!reorgInfo.ReorgMeta.SQLMode.HasStrictMode())) updateWorker := newUpdateColumnWorker(sessCtx, i, b.tbl, b.decodeColMap, reorgInfo, jc) runner = newBackfillWorker(jc.ddlJobCtx, updateWorker) worker = updateWorker @@ -466,9 +467,9 @@ func (*ingestBackfillScheduler) expectedWorkerSize() (readerSize int, writerSize func expectedIngestWorkerCnt() (readerCnt, writerCnt int) { workerCnt := int(variable.GetDDLReorgWorkerCounter()) - readerCnt = mathutil.Min(workerCnt/2, maxBackfillWorkerSize) - readerCnt = mathutil.Max(readerCnt, 1) - writerCnt = mathutil.Min(workerCnt/2+2, maxBackfillWorkerSize) + readerCnt = min(workerCnt/2, maxBackfillWorkerSize) + readerCnt = max(readerCnt, 1) + writerCnt = min(workerCnt/2+2, maxBackfillWorkerSize) return readerCnt, writerCnt } diff --git a/pkg/ddl/column_modify_test.go b/pkg/ddl/column_modify_test.go index d09b7f9715203..d733698203625 100644 --- a/pkg/ddl/column_modify_test.go +++ b/pkg/ddl/column_modify_test.go @@ -133,7 +133,7 @@ AddLoop: func(_ kv.Handle, data []types.Datum, cols []*table.Column) (bool, error) { i++ // c4 must be -1 or > 0 - v, err := data[3].ToInt64(tk.Session().GetSessionVars().StmtCtx) + v, err := data[3].ToInt64(tk.Session().GetSessionVars().StmtCtx.TypeCtx()) require.NoError(t, err) if v == -1 { j++ diff --git a/pkg/ddl/constraint_test.go b/pkg/ddl/constraint_test.go index c1837bcd54d6b..06d3c8abce514 100644 --- a/pkg/ddl/constraint_test.go +++ b/pkg/ddl/constraint_test.go @@ -17,6 +17,7 @@ package ddl_test import ( "fmt" "testing" + "time" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/ddl/util/callback" @@ -243,6 +244,8 @@ func TestAlterAddConstraintStateChange3(t *testing.T) { callback.OnJobUpdatedExported.Store(&onJobUpdatedExportedFunc3) d.SetHook(callback) tk.MustExec("alter table t add constraint c3 check ( a > 10)") + // Issue TiDB#48123. + time.Sleep(50 * time.Millisecond) tk.MustQuery("select * from t").Check(testkit.Rows("12")) tk.MustQuery("show create table t").Check(testkit.Rows("t CREATE TABLE `t` (\n `a` int(11) DEFAULT NULL,\nCONSTRAINT `c3` CHECK ((`a` > 10))\n) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) } diff --git a/pkg/ddl/ddl.go b/pkg/ddl/ddl.go index e8fc34f775dcb..ba9fc07043284 100644 --- a/pkg/ddl/ddl.go +++ b/pkg/ddl/ddl.go @@ -62,6 +62,7 @@ import ( pumpcli "github.com/pingcap/tidb/pkg/tidb-binlog/pump_client" tidbutil "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/dbterror" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/gcutil" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/syncutil" @@ -697,17 +698,11 @@ func newDDL(ctx context.Context, options ...Option) *ddl { }, scheduler.WithSummary, ) - backFillDsp, err := NewBackfillingDispatcherExt(d) - if err != nil { - logutil.BgLogger().Warn("NewBackfillingDispatcherExt failed", zap.String("category", "ddl"), zap.Error(err)) - } else { - dispatcher.RegisterDispatcherFactory(proto.Backfill, - func(ctx context.Context, taskMgr dispatcher.TaskManager, serverID string, task *proto.Task) dispatcher.Dispatcher { - return newLitBackfillDispatcher(ctx, taskMgr, serverID, task, backFillDsp) - }) - dispatcher.RegisterDispatcherCleanUpFactory(proto.Backfill, newBackfillCleanUpS3) - } - + dispatcher.RegisterDispatcherFactory(proto.Backfill, + func(ctx context.Context, taskMgr dispatcher.TaskManager, serverID string, task *proto.Task) dispatcher.Dispatcher { + return newLitBackfillDispatcher(ctx, d, taskMgr, serverID, task) + }) + dispatcher.RegisterDispatcherCleanUpFactory(proto.Backfill, newBackfillCleanUpS3) // Register functions for enable/disable ddl when changing system variable `tidb_enable_ddl`. variable.EnableDDL = d.EnableDDL variable.DisableDDL = d.DisableDDL @@ -1124,7 +1119,7 @@ func (d *ddl) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { } // If the connection being killed, we need to CANCEL the DDL job. - if atomic.LoadUint32(&sessVars.Killed) == 1 { + if sessVars.SQLKiller.HandleSignal() == exeerrors.ErrQueryInterrupted { if atomic.LoadInt32(&sessVars.ConnectionStatus) == variable.ConnStatusShutdown { logutil.BgLogger().Info("DoDDLJob will quit because context done", zap.String("category", "ddl")) return context.Canceled @@ -1407,7 +1402,7 @@ func GetDDLInfoWithNewTxn(s sessionctx.Context) (*Info, error) { return info, err } -// GetDDLInfo returns DDL information. +// GetDDLInfo returns DDL information and only uses for testing. func GetDDLInfo(s sessionctx.Context) (*Info, error) { var err error info := &Info{} diff --git a/pkg/ddl/ddl_api.go b/pkg/ddl/ddl_api.go index ed37d96d4a219..745437f81a980 100644 --- a/pkg/ddl/ddl_api.go +++ b/pkg/ddl/ddl_api.go @@ -62,14 +62,15 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/dbterror" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/domainutil" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/mathutil" - "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/mock" "github.com/pingcap/tidb/pkg/util/set" "github.com/pingcap/tidb/pkg/util/sqlexec" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/pingcap/tidb/pkg/util/stringutil" "github.com/tikv/client-go/v2/oracle" kvutil "github.com/tikv/client-go/v2/util" @@ -313,14 +314,14 @@ func (d *ddl) getPendingTiFlashTableCount(sctx sessionctx.Context, originVersion func isSessionDone(sctx sessionctx.Context) (bool, uint32) { done := false - killed := atomic.LoadUint32(&sctx.GetSessionVars().Killed) - if killed == 1 { - done = true + killed := sctx.GetSessionVars().SQLKiller.HandleSignal() == exeerrors.ErrQueryInterrupted + if killed { + return true, 1 } failpoint.Inject("BatchAddTiFlashSendDone", func(val failpoint.Value) { done = val.(bool) }) - return done, killed + return done, 0 } func (d *ddl) waitPendingTableThreshold(sctx sessionctx.Context, schemaID int64, tableID int64, originVersion int64, pendingCount uint32, threshold uint32) (bool, int64, uint32, bool) { @@ -1042,7 +1043,7 @@ func convertTimestampDefaultValToUTC(ctx sessionctx.Context, defaultVal interfac } if vv, ok := defaultVal.(string); ok { if vv != types.ZeroDatetimeStr && !strings.EqualFold(vv, ast.CurrentTimestamp) { - t, err := types.ParseTime(ctx.GetSessionVars().StmtCtx, vv, col.GetType(), col.GetDecimal(), nil) + t, err := types.ParseTime(ctx.GetSessionVars().StmtCtx.TypeCtx(), vv, col.GetType(), col.GetDecimal(), nil) if err != nil { return defaultVal, errors.Trace(err) } @@ -1367,7 +1368,7 @@ func getDefaultValue(ctx sessionctx.Context, col *table.Column, option *ast.Colu val, err := getEnumDefaultValue(v, col) return val, false, err case mysql.TypeDuration, mysql.TypeDate: - if v, err = v.ConvertTo(ctx.GetSessionVars().StmtCtx, &col.FieldType); err != nil { + if v, err = v.ConvertTo(ctx.GetSessionVars().StmtCtx.TypeCtx(), &col.FieldType); err != nil { return "", false, errors.Trace(err) } case mysql.TypeBit: @@ -1379,7 +1380,7 @@ func getDefaultValue(ctx sessionctx.Context, col *table.Column, option *ast.Colu // For these types, convert it to standard format firstly. // like integer fields, convert it into integer string literals. like convert "1.25" into "1" and "2.8" into "3". // if raise a error, we will use original expression. We will handle it in check phase - if temp, err := v.ConvertTo(ctx.GetSessionVars().StmtCtx, &col.FieldType); err == nil { + if temp, err := v.ConvertTo(ctx.GetSessionVars().StmtCtx.TypeCtx(), &col.FieldType); err == nil { v = temp } } @@ -1759,16 +1760,20 @@ func checkColumnAttributes(colName string, tp *types.FieldType) error { return nil } -func checkDuplicateConstraint(namesMap map[string]bool, name string, foreign bool) error { +func checkDuplicateConstraint(namesMap map[string]bool, name string, constraintType ast.ConstraintType) error { if name == "" { return nil } nameLower := strings.ToLower(name) if namesMap[nameLower] { - if foreign { + switch constraintType { + case ast.ConstraintForeignKey: return dbterror.ErrFkDupName.GenWithStackByArgs(name) + case ast.ConstraintCheck: + return dbterror.ErrCheckConstraintDupName.GenWithStackByArgs(name) + default: + return dbterror.ErrDupKeyName.GenWithStackByArgs(name) } - return dbterror.ErrDupKeyName.GenWithStack("duplicate key name %s", name) } namesMap[nameLower] = true return nil @@ -1828,12 +1833,12 @@ func checkConstraintNames(tableName model.CIStr, constraints []*ast.Constraint) // Check not empty constraint name whether is duplicated. for _, constr := range constraints { if constr.Tp == ast.ConstraintForeignKey { - err := checkDuplicateConstraint(fkNames, constr.Name, true) + err := checkDuplicateConstraint(fkNames, constr.Name, constr.Tp) if err != nil { return errors.Trace(err) } } else { - err := checkDuplicateConstraint(constrNames, constr.Name, false) + err := checkDuplicateConstraint(constrNames, constr.Name, constr.Tp) if err != nil { return errors.Trace(err) } @@ -3046,7 +3051,8 @@ func checkPartitionByHash(ctx sessionctx.Context, tbInfo *model.TableInfo) error // checkPartitionByRange checks validity of a "BY RANGE" partition. func checkPartitionByRange(ctx sessionctx.Context, tbInfo *model.TableInfo) error { failpoint.Inject("CheckPartitionByRangeErr", func() { - panic(memory.PanicMemoryExceedWarnMsg) + ctx.GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.QueryMemoryExceeded) + panic(ctx.GetSessionVars().SQLKiller.HandleSignal()) }) pi := tbInfo.Partition @@ -7246,11 +7252,11 @@ func (d *ddl) createIndex(ctx sessionctx.Context, ti ast.Ident, keyType ast.Inde if indexInfo := t.Meta().FindIndexByName(indexName.L); indexInfo != nil { if indexInfo.State != model.StatePublic { // NOTE: explicit error message. See issue #18363. - err = dbterror.ErrDupKeyName.GenWithStack("index already exist %s; "+ + err = dbterror.ErrDupKeyName.GenWithStack("Duplicate key name '%s'; "+ "a background job is trying to add the same index, "+ "please check by `ADMIN SHOW DDL JOBS`", indexName) } else { - err = dbterror.ErrDupKeyName.GenWithStack("index already exist %s", indexName) + err = dbterror.ErrDupKeyName.GenWithStackByArgs(indexName) } if ifNotExists { ctx.GetSessionVars().StmtCtx.AppendNote(err) @@ -7832,7 +7838,7 @@ func checkAndGetColumnsTypeAndValuesMatch(ctx sessionctx.Context, colTypes []typ return nil, dbterror.ErrWrongTypeColumnValue.GenWithStackByArgs() } } - newVal, err := val.ConvertTo(ctx.GetSessionVars().StmtCtx, &colType) + newVal, err := val.ConvertTo(ctx.GetSessionVars().StmtCtx.TypeCtx(), &colType) if err != nil { return nil, dbterror.ErrWrongTypeColumnValue.GenWithStackByArgs() } @@ -8992,5 +8998,6 @@ func NewDDLReorgMeta(ctx sessionctx.Context) *model.DDLReorgMeta { WarningsCount: make(map[errors.ErrorID]int64), Location: &model.TimeZoneLocation{Name: tzName, Offset: tzOffset}, ResourceGroupName: ctx.GetSessionVars().ResourceGroupName, + Version: model.CurrentReorgMetaVersion, } } diff --git a/pkg/ddl/ddl_test.go b/pkg/ddl/ddl_test.go index 7b8df8620df02..a59c58833fc88 100644 --- a/pkg/ddl/ddl_test.go +++ b/pkg/ddl/ddl_test.go @@ -285,3 +285,25 @@ func TestError(t *testing.T) { require.Equal(t, uint16(err.Code()), code) } } + +func TestCheckDuplicateConstraint(t *testing.T) { + constrNames := map[string]bool{} + + // Foreign Key + err := checkDuplicateConstraint(constrNames, "f1", ast.ConstraintForeignKey) + require.NoError(t, err) + err = checkDuplicateConstraint(constrNames, "f1", ast.ConstraintForeignKey) + require.EqualError(t, err, "[ddl:1826]Duplicate foreign key constraint name 'f1'") + + // Check constraint + err = checkDuplicateConstraint(constrNames, "c1", ast.ConstraintCheck) + require.NoError(t, err) + err = checkDuplicateConstraint(constrNames, "c1", ast.ConstraintCheck) + require.EqualError(t, err, "[ddl:3822]Duplicate check constraint name 'c1'.") + + // Unique contraints etc + err = checkDuplicateConstraint(constrNames, "u1", ast.ConstraintUniq) + require.NoError(t, err) + err = checkDuplicateConstraint(constrNames, "u1", ast.ConstraintUniq) + require.EqualError(t, err, "[ddl:1061]Duplicate key name 'u1'") +} diff --git a/pkg/ddl/ddl_worker.go b/pkg/ddl/ddl_worker.go index 96d59251ff32b..12bfeef693ed2 100644 --- a/pkg/ddl/ddl_worker.go +++ b/pkg/ddl/ddl_worker.go @@ -537,16 +537,6 @@ func needUpdateRawArgs(job *model.Job, meetErr bool) bool { return true } -func (w *worker) deleteRange(ctx context.Context, job *model.Job) error { - var err error - if job.Version <= currentVersion { - err = w.delRangeManager.addDelRangeJob(ctx, job) - } else { - err = dbterror.ErrInvalidDDLJobVersion.GenWithStackByArgs(job.Version, currentVersion) - } - return errors.Trace(err) -} - func jobNeedGC(job *model.Job) bool { if !job.IsCancelled() { if job.Warning != nil && dbterror.ErrCantDropFieldOrKey.Equal(job.Warning) { @@ -587,7 +577,7 @@ func (w *worker) finishDDLJob(t *meta.Meta, job *model.Job) (err error) { }() if jobNeedGC(job) { - err = w.deleteRange(w.ctx, job) + err = w.delRangeManager.addDelRangeJob(w.ctx, job) if err != nil { return errors.Trace(err) } diff --git a/pkg/ddl/delete_range.go b/pkg/ddl/delete_range.go index 16de4f259b1d2..c06ebe4a62432 100644 --- a/pkg/ddl/delete_range.go +++ b/pkg/ddl/delete_range.go @@ -384,7 +384,10 @@ func insertJobIntoDeleteRangeTable(ctx context.Context, sctx sessionctx.Context, startKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID) endKey := tablecodec.EncodeTableIndexPrefix(tableID, indexID+1) elemID := ea.allocForIndexID(tableID, indexID) - return doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("index ID is %d", indexID)) + if err := doInsert(ctx, s, job.ID, elemID, startKey, endKey, now, fmt.Sprintf("index ID is %d", indexID)); err != nil { + return errors.Trace(err) + } + continue } failpoint.Inject("checkDropGlobalIndex", func(val failpoint.Value) { if val.(bool) { diff --git a/pkg/ddl/index.go b/pkg/ddl/index.go index b0f888b53d90e..6cbe46415ba98 100644 --- a/pkg/ddl/index.go +++ b/pkg/ddl/index.go @@ -1088,9 +1088,7 @@ func runReorgJobAndHandleErr(w *worker, d *ddlCtx, t *meta.Meta, job *model.Job, // TODO(tangenta): get duplicate column and match index. err = convertToKeyExistsErr(err, allIndexInfos[0], tbl.Meta()) } - if !errorIsRetryable(err, job) || - // TODO: Remove this check make it can be retry. Related test is TestModifyColumnReorgInfo. - job.ReorgMeta.IsDistReorg { + if !errorIsRetryable(err, job) { logutil.BgLogger().Warn("run add index job failed, convert job to rollback", zap.String("category", "ddl"), zap.String("job", job.String()), zap.Error(err)) ver, err = convertAddIdxJob2RollbackJob(d, t, job, tbl.Meta(), allIndexInfos, err) if err1 := rh.RemoveDDLReorgHandle(job, reorgInfo.elements); err1 != nil { @@ -2051,6 +2049,9 @@ var MockDMLExecutionOnTaskFinished func() // MockDMLExecutionOnDDLPaused is used to mock DML execution when ddl job paused. var MockDMLExecutionOnDDLPaused func() +// TestSyncChan is used to sync the test. +var TestSyncChan = make(chan struct{}) + func (w *worker) executeDistGlobalTask(reorgInfo *reorgInfo) error { if reorgInfo.mergingTmpIdx { return errors.New("do not support merge index") @@ -2066,12 +2067,16 @@ func (w *worker) executeDistGlobalTask(reorgInfo *reorgInfo) error { taskKey = fmt.Sprintf("%s/%d", taskKey, mInfo.Seq) } - // for resuming add index task. + // For resuming add index task. + // Need to fetch global task by taskKey in tidb_global_task and tidb_global_task_history tables. + // When pausing the related ddl job, it is possible that the global task with taskKey is succeed and in tidb_global_task_history. + // As a result, when resuming the related ddl job, + // it is necessary to check task exits in tidb_global_task and tidb_global_task_history tables. taskManager, err := storage.GetTaskManager() if err != nil { return err } - task, err := taskManager.GetGlobalTaskByKey(taskKey) + task, err := taskManager.GetGlobalTaskByKeyWithHistory(taskKey) if err != nil { return err } @@ -2079,6 +2084,10 @@ func (w *worker) executeDistGlobalTask(reorgInfo *reorgInfo) error { // It's possible that the task state is succeed but the ddl job is paused. // When task in succeed state, we can skip the dist task execution/scheduing process. if task.State == proto.TaskStateSucceed { + logutil.BgLogger().Info( + "global task succeed, start to resume the ddl job", + zap.String("category", "ddl"), + zap.String("task-key", taskKey)) return nil } g.Go(func() error { @@ -2093,9 +2102,11 @@ func (w *worker) executeDistGlobalTask(reorgInfo *reorgInfo) error { return err } err = handle.WaitGlobalTask(ctx, task) - if w.isReorgPaused(reorgInfo.Job.ID) { - logutil.BgLogger().Warn("job paused by user", zap.String("category", "ddl"), zap.Error(err)) - return dbterror.ErrPausedDDLJob.GenWithStackByArgs(reorgInfo.Job.ID) + if err := w.isReorgRunnable(reorgInfo.Job.ID, true); err != nil { + if dbterror.ErrPausedDDLJob.Equal(err) { + logutil.BgLogger().Warn("job paused by user", zap.String("category", "ddl"), zap.Error(err)) + return dbterror.ErrPausedDDLJob.GenWithStackByArgs(reorgInfo.Job.ID) + } } return err }) @@ -2121,12 +2132,14 @@ func (w *worker) executeDistGlobalTask(reorgInfo *reorgInfo) error { g.Go(func() error { defer close(done) err := handle.SubmitAndRunGlobalTask(ctx, taskKey, taskType, distPhysicalTableConcurrency, metaData) - failpoint.Inject("pauseAfterDistTaskSuccess", func() { + failpoint.Inject("pauseAfterDistTaskFinished", func() { MockDMLExecutionOnTaskFinished() }) - if w.isReorgPaused(reorgInfo.Job.ID) { - logutil.BgLogger().Warn("job paused by user", zap.String("category", "ddl"), zap.Error(err)) - return dbterror.ErrPausedDDLJob.GenWithStackByArgs(reorgInfo.Job.ID) + if err := w.isReorgRunnable(reorgInfo.Job.ID, true); err != nil { + if dbterror.ErrPausedDDLJob.Equal(err) { + logutil.BgLogger().Warn("job paused by user", zap.String("category", "ddl"), zap.Error(err)) + return dbterror.ErrPausedDDLJob.GenWithStackByArgs(reorgInfo.Job.ID) + } } return err }) @@ -2149,13 +2162,17 @@ func (w *worker) executeDistGlobalTask(reorgInfo *reorgInfo) error { logutil.BgLogger().Error("pause global task error", zap.String("category", "ddl"), zap.String("task_key", taskKey), zap.Error(err)) continue } + failpoint.Inject("syncDDLTaskPause", func() { + // make sure the task is paused. + TestSyncChan <- struct{}{} + }) } if !dbterror.ErrCancelledDDLJob.Equal(err) { return errors.Trace(err) } if err = handle.CancelGlobalTask(taskKey); err != nil { logutil.BgLogger().Error("cancel global task error", zap.String("category", "ddl"), zap.String("task_key", taskKey), zap.Error(err)) - // continue to cancel global task + // continue to cancel global task. continue } } diff --git a/pkg/ddl/job_table.go b/pkg/ddl/job_table.go index 2e494a5a182f7..94125eefdbfc6 100644 --- a/pkg/ddl/job_table.go +++ b/pkg/ddl/job_table.go @@ -329,7 +329,7 @@ func (d *ddl) checkAndUpdateClusterState(needUpdate bool) error { ownerOp := owner.OpNone if stateInfo.State == syncer.StateUpgrading { - ownerOp = owner.OpGetUpgradingState + ownerOp = owner.OpSyncUpgradingState } err = d.ownerManager.SetOwnerOpValue(d.ctx, ownerOp) if err != nil { diff --git a/pkg/ddl/partition.go b/pkg/ddl/partition.go index ac141e49054ba..23be2ff7b71af 100644 --- a/pkg/ddl/partition.go +++ b/pkg/ddl/partition.go @@ -866,7 +866,7 @@ func getLowerBoundInt(partCols ...*model.ColumnInfo) int64 { if mysql.HasUnsignedFlag(col.FieldType.GetFlag()) { return 0 } - ret = mathutil.Min(ret, types.IntergerSignedLowerBound(col.GetType())) + ret = min(ret, types.IntergerSignedLowerBound(col.GetType())) } return ret } @@ -1097,7 +1097,7 @@ func GeneratePartDefsFromInterval(ctx sessionctx.Context, tp ast.AlterTableType, if err != nil { return err } - cmp, err := currVal.Compare(ctx.GetSessionVars().StmtCtx, &lastVal, collate.GetBinaryCollator()) + cmp, err := currVal.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &lastVal, collate.GetBinaryCollator()) if err != nil { return err } @@ -1427,7 +1427,7 @@ func checkPartitionValuesIsInt(ctx sessionctx.Context, defName interface{}, expr return dbterror.ErrValuesIsNotIntType.GenWithStackByArgs(defName) } - _, err = val.ConvertTo(ctx.GetSessionVars().StmtCtx, tp) + _, err = val.ConvertTo(ctx.GetSessionVars().StmtCtx.TypeCtx(), tp) if err != nil && !types.ErrOverflow.Equal(err) { return dbterror.ErrWrongTypeColumnValue.GenWithStackByArgs() } diff --git a/pkg/ddl/placement/bundle.go b/pkg/ddl/placement/bundle.go index 110a39f397ca3..e9331571c6d2c 100644 --- a/pkg/ddl/placement/bundle.go +++ b/pkg/ddl/placement/bundle.go @@ -67,8 +67,8 @@ func NewBundleFromConstraintsOptions(options *model.PlacementSettings) (*Bundle, leaderConst := options.LeaderConstraints learnerConstraints := options.LearnerConstraints followerConstraints := options.FollowerConstraints - followerCount := options.Followers - learnerCount := options.Learners + explicitFollowerCount := options.Followers + explicitLearnerCount := options.Learners rules := []*Rule{} commonConstraints, err := NewConstraintsFromYaml([]byte(constraints)) @@ -77,7 +77,10 @@ func NewBundleFromConstraintsOptions(options *model.PlacementSettings) (*Bundle, // The dictionary format specifies details for each replica. Constraints are used to define normal // replicas that should act as voters. // For example: CONSTRAINTS='{ "+region=us-east-1":2, "+region=us-east-2": 2, "+region=us-west-1": 1}' - normalReplicasRules, err := NewRulesWithDictConstraints(Voter, constraints) + normalReplicasRules, err := NewRuleBuilder(). + SetRole(Voter). + SetConstraintStr(constraints). + BuildRulesWithDictConstraintsOnly() if err != nil { return nil, err } @@ -94,15 +97,15 @@ func NewBundleFromConstraintsOptions(options *model.PlacementSettings) (*Bundle, } } leaderReplicas, followerReplicas := uint64(1), uint64(2) - if followerCount > 0 { - followerReplicas = followerCount + if explicitFollowerCount > 0 { + followerReplicas = explicitFollowerCount } if !needCreateDefault { if len(leaderConstraints) == 0 { leaderReplicas = 0 } if len(followerConstraints) == 0 { - if followerCount > 0 { + if explicitFollowerCount > 0 { return nil, fmt.Errorf("%w: specify follower count without specify follower constraints when specify other constraints", ErrInvalidPlacementOptions) } followerReplicas = 0 @@ -119,7 +122,12 @@ func NewBundleFromConstraintsOptions(options *model.PlacementSettings) (*Bundle, // create follower rules. // if no constraints, we need create default follower rules. if followerReplicas > 0 { - followerRules, err := NewRules(Voter, followerReplicas, followerConstraints) + builder := NewRuleBuilder(). + SetRole(Voter). + SetReplicasNum(followerReplicas). + SetSkipCheckReplicasConsistent(needCreateDefault && (explicitFollowerCount == 0)). + SetConstraintStr(followerConstraints) + followerRules, err := builder.BuildRules() if err != nil { return nil, fmt.Errorf("%w: invalid FollowerConstraints", err) } @@ -134,7 +142,11 @@ func NewBundleFromConstraintsOptions(options *model.PlacementSettings) (*Bundle, } // create learner rules. - learnerRules, err := NewRules(Learner, learnerCount, learnerConstraints) + builder := NewRuleBuilder(). + SetRole(Learner). + SetReplicasNum(explicitLearnerCount). + SetConstraintStr(learnerConstraints) + learnerRules, err := builder.BuildRules() if err != nil { return nil, fmt.Errorf("%w: invalid LearnerConstraints", err) } diff --git a/pkg/ddl/placement/bundle_test.go b/pkg/ddl/placement/bundle_test.go index 960a37b9236ec..c1b7c067c774b 100644 --- a/pkg/ddl/placement/bundle_test.go +++ b/pkg/ddl/placement/bundle_test.go @@ -342,9 +342,9 @@ func TestString(t *testing.T) { ID: GroupID(1), } - rules1, err := NewRules(Voter, 3, `["+zone=sh", "+zone=sh"]`) + rules1, err := newRules(Voter, 3, `["+zone=sh", "+zone=sh"]`) require.NoError(t, err) - rules2, err := NewRules(Voter, 4, `["-zone=sh", "+zone=bj"]`) + rules2, err := newRules(Voter, 4, `["-zone=sh", "+zone=bj"]`) require.NoError(t, err) bundle.Rules = append(rules1, rules2...) @@ -727,6 +727,26 @@ func TestNewBundleFromOptions(t *testing.T) { err: ErrInvalidConstraintsFormat, }) + tests = append(tests, TestCase{ + name: "direct syntax: follower dict constraints", + input: &model.PlacementSettings{ + FollowerConstraints: "{+disk=ssd: 1}", + }, + output: []*Rule{ + NewRule(Leader, 1, NewConstraintsDirect()), + NewRule(Voter, 1, NewConstraintsDirect(NewConstraintDirect("disk", In, "ssd"))), + }, + }) + + tests = append(tests, TestCase{ + name: "direct syntax: invalid follower dict constraints", + input: &model.PlacementSettings{ + FollowerConstraints: "{+disk=ssd: 1}", + Followers: 2, + }, + err: ErrInvalidConstraintsReplicas, + }) + tests = append(tests, TestCase{ name: "direct syntax: learner dict constraints", input: &model.PlacementSettings{ @@ -799,7 +819,7 @@ func TestResetBundleWithSingleRule(t *testing.T) { ID: GroupID(1), } - rules, err := NewRules(Voter, 3, `["+zone=sh", "+zone=sh"]`) + rules, err := newRules(Voter, 3, `["+zone=sh", "+zone=sh"]`) require.NoError(t, err) bundle.Rules = rules @@ -916,15 +936,15 @@ func TestTidy(t *testing.T) { ID: GroupID(1), } - rules0, err := NewRules(Voter, 1, `["+zone=sh", "+zone=sh"]`) + rules0, err := newRules(Voter, 1, `["+zone=sh", "+zone=sh"]`) require.NoError(t, err) require.Len(t, rules0, 1) rules0[0].Count = 0 // test prune useless rules - rules1, err := NewRules(Voter, 4, `["-zone=sh", "+zone=bj"]`) + rules1, err := newRules(Voter, 4, `["-zone=sh", "+zone=bj"]`) require.NoError(t, err) require.Len(t, rules1, 1) - rules2, err := NewRules(Voter, 0, `{"-zone=sh,+zone=bj": 4}}`) + rules2, err := newRules(Voter, 0, `{"-zone=sh,+zone=bj": 4}}`) require.NoError(t, err) bundle.Rules = append(bundle.Rules, rules0...) bundle.Rules = append(bundle.Rules, rules1...) @@ -943,11 +963,11 @@ func TestTidy(t *testing.T) { }, bundle.Rules[0].Constraints[2]) // merge - rules3, err := NewRules(Follower, 4, "") + rules3, err := newRules(Follower, 4, "") require.NoError(t, err) require.Len(t, rules3, 1) - rules4, err := NewRules(Follower, 5, "") + rules4, err := newRules(Follower, 5, "") require.NoError(t, err) require.Len(t, rules4, 1) diff --git a/pkg/ddl/placement/rule.go b/pkg/ddl/placement/rule.go index f21588b664682..c52839c37bb32 100644 --- a/pkg/ddl/placement/rule.go +++ b/pkg/ddl/placement/rule.go @@ -153,6 +153,70 @@ func (r *TiFlashRule) UnmarshalJSON(bytes []byte) error { return err } +// RuleBuilder is used to build the Rules from a constraint string. +type RuleBuilder struct { + role PeerRoleType + replicasNum uint64 + skipCheckReplicasConsistent bool + constraintStr string +} + +// NewRuleBuilder creates a new RuleBuilder. +func NewRuleBuilder() *RuleBuilder { + return &RuleBuilder{} +} + +// SetRole sets the role of the rule. +func (b *RuleBuilder) SetRole(role PeerRoleType) *RuleBuilder { + b.role = role + return b +} + +// SetReplicasNum sets the replicas number in the rule. +func (b *RuleBuilder) SetReplicasNum(num uint64) *RuleBuilder { + b.replicasNum = num + return b +} + +// SetSkipCheckReplicasConsistent sets the skipCheckReplicasConsistent flag. +func (b *RuleBuilder) SetSkipCheckReplicasConsistent(skip bool) *RuleBuilder { + b.skipCheckReplicasConsistent = skip + return b +} + +// SetConstraintStr sets the constraint string. +func (b *RuleBuilder) SetConstraintStr(constraintStr string) *RuleBuilder { + b.constraintStr = constraintStr + return b +} + +// BuildRulesWithDictConstraintsOnly constructs []*Rule from a yaml-compatible representation of +// 'dict' constraints. +func (b *RuleBuilder) BuildRulesWithDictConstraintsOnly() ([]*Rule, error) { + return newRulesWithDictConstraints(b.role, b.constraintStr) +} + +// BuildRules constructs []*Rule from a yaml-compatible representation of +// 'array' or 'dict' constraints. +// Refer to https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-24-placement-rules-in-sql.md. +func (b *RuleBuilder) BuildRules() ([]*Rule, error) { + rules, err := newRules(b.role, b.replicasNum, b.constraintStr) + // check if replicas is consistent + if err == nil { + if b.skipCheckReplicasConsistent { + return rules, err + } + totalCnt := 0 + for _, rule := range rules { + totalCnt += rule.Count + } + if b.replicasNum != 0 && b.replicasNum != uint64(totalCnt) { + err = fmt.Errorf("%w: count of replicas in dict constrains is %d, but got %d", ErrInvalidConstraintsReplicas, totalCnt, b.replicasNum) + } + } + return rules, err +} + // NewRule constructs *Rule from role, count, and constraints. It is here to // consistent the behavior of creating new rules. func NewRule(role PeerRoleType, replicas uint64, cnst Constraints) *Rule { @@ -175,10 +239,10 @@ func getYamlMapFormatError(str string) error { return nil } -// NewRules constructs []*Rule from a yaml-compatible representation of +// newRules constructs []*Rule from a yaml-compatible representation of // 'array' or 'dict' constraints. // Refer to https://github.com/pingcap/tidb/blob/master/docs/design/2020-06-24-placement-rules-in-sql.md. -func NewRules(role PeerRoleType, replicas uint64, cnstr string) (rules []*Rule, err error) { +func newRules(role PeerRoleType, replicas uint64, cnstr string) (rules []*Rule, err error) { cnstbytes := []byte(cnstr) constraints1, err1 := NewConstraintsFromYaml(cnstbytes) if err1 == nil { @@ -199,23 +263,12 @@ func NewRules(role PeerRoleType, replicas uint64, cnstr string) (rules []*Rule, return } - rules, err = NewRulesWithDictConstraints(role, cnstr) - // check if replicas is consistent - if err == nil { - totalCnt := 0 - for _, rule := range rules { - totalCnt += rule.Count - } - if replicas != 0 && replicas != uint64(totalCnt) { - err = fmt.Errorf("%w: count of replicas in dict constrains is %d, but got %d", ErrInvalidConstraintsReplicas, totalCnt, replicas) - } - } - return + return newRulesWithDictConstraints(role, cnstr) } -// NewRulesWithDictConstraints constructs []*Rule from a yaml-compatible representation of +// newRulesWithDictConstraints constructs []*Rule from a yaml-compatible representation of // 'dict' constraints. -func NewRulesWithDictConstraints(role PeerRoleType, cnstr string) ([]*Rule, error) { +func newRulesWithDictConstraints(role PeerRoleType, cnstr string) ([]*Rule, error) { rules := []*Rule{} cnstbytes := []byte(cnstr) constraints2 := map[string]int{} diff --git a/pkg/ddl/placement/rule_test.go b/pkg/ddl/placement/rule_test.go index ed1e032b31892..dd6eadf4d29c5 100644 --- a/pkg/ddl/placement/rule_test.go +++ b/pkg/ddl/placement/rule_test.go @@ -172,7 +172,7 @@ func TestNewRuleAndNewRules(t *testing.T) { for _, tt := range tests { comment := fmt.Sprintf("[%s]", tt.name) - output, err := NewRules(Voter, tt.replicas, tt.input) + output, err := newRules(Voter, tt.replicas, tt.input) if tt.err == nil { require.NoError(t, err, comment) matchRules(tt.output, output, comment, t) diff --git a/pkg/ddl/reorg.go b/pkg/ddl/reorg.go index 909f2d414a9ab..65089a81ddc1d 100644 --- a/pkg/ddl/reorg.go +++ b/pkg/ddl/reorg.go @@ -181,6 +181,7 @@ func (w *worker) runReorgJob(reorgInfo *reorgInfo, tblInfo *model.TableInfo, Warnings: make(map[errors.ErrorID]*terror.Error), WarningsCount: make(map[errors.ErrorID]int64), Location: &model.TimeZoneLocation{Name: time.UTC.String(), Offset: 0}, + Version: model.CurrentReorgMetaVersion, } } @@ -892,5 +893,15 @@ func CleanupDDLReorgHandles(job *model.Job, s *sess.Session) { // GetDDLReorgHandle gets the latest processed DDL reorganize position. func (r *reorgHandler) GetDDLReorgHandle(job *model.Job) (element *meta.Element, startKey, endKey kv.Key, physicalTableID int64, err error) { - return getDDLReorgHandle(r.s, job) + element, startKey, endKey, physicalTableID, err = getDDLReorgHandle(r.s, job) + if job.ReorgMeta != nil && job.ReorgMeta.Version == model.ReorgMetaVersion0 && err == nil { + logutil.BgLogger().Info("job get table range for old version ReorgMetas", zap.String("category", "ddl"), + zap.Int64("jobID", job.ID), zap.Int64("job ReorgMeta version", job.ReorgMeta.Version), zap.Int64("physical table ID", physicalTableID), + zap.String("startKey", hex.EncodeToString(startKey)), + zap.String("current endKey", hex.EncodeToString(endKey)), + zap.String("endKey next", hex.EncodeToString(endKey.Next()))) + endKey = endKey.Next() + } + + return } diff --git a/pkg/ddl/sequence.go b/pkg/ddl/sequence.go index 9772322d90f06..e60043be6efe1 100644 --- a/pkg/ddl/sequence.go +++ b/pkg/ddl/sequence.go @@ -132,7 +132,7 @@ func handleSequenceOptions(seqOptions []*ast.SequenceOption, sequenceInfo *model sequenceInfo.MaxValue = model.DefaultNegativeSequenceMaxValue } if !startSetFlag { - sequenceInfo.Start = mathutil.Min(sequenceInfo.MaxValue, model.DefaultNegativeSequenceStartValue) + sequenceInfo.Start = min(sequenceInfo.MaxValue, model.DefaultNegativeSequenceStartValue) } if !minSetFlag { sequenceInfo.MinValue = model.DefaultNegativeSequenceMinValue diff --git a/pkg/ddl/tests/tiflash/BUILD.bazel b/pkg/ddl/tests/tiflash/BUILD.bazel index c7855a35a4dfb..f0117788b0599 100644 --- a/pkg/ddl/tests/tiflash/BUILD.bazel +++ b/pkg/ddl/tests/tiflash/BUILD.bazel @@ -30,6 +30,7 @@ go_test( "//pkg/testkit/testsetup", "//pkg/util", "//pkg/util/logutil", + "//pkg/util/sqlkiller", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/metapb", "@com_github_stretchr_testify//require", diff --git a/pkg/ddl/tests/tiflash/ddl_tiflash_test.go b/pkg/ddl/tests/tiflash/ddl_tiflash_test.go index 7da565ffd680d..948b277a6f68c 100644 --- a/pkg/ddl/tests/tiflash/ddl_tiflash_test.go +++ b/pkg/ddl/tests/tiflash/ddl_tiflash_test.go @@ -23,7 +23,6 @@ import ( "fmt" "math" "sync" - "sync/atomic" "testing" "time" @@ -47,6 +46,7 @@ import ( "github.com/pingcap/tidb/pkg/testkit/external" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" "github.com/tikv/client-go/v2/testutils" @@ -920,7 +920,7 @@ func TestTiFlashBatchKill(t *testing.T) { wg.Run(func() { time.Sleep(time.Millisecond * 100) sessVars := tk.Session().GetSessionVars() - atomic.StoreUint32(&sessVars.Killed, 1) + sessVars.SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) }) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/FastFailCheckTiFlashPendingTables", `return(2)`)) diff --git a/pkg/distsql/BUILD.bazel b/pkg/distsql/BUILD.bazel index e6acd81c960b6..64a0f10eb10e7 100644 --- a/pkg/distsql/BUILD.bazel +++ b/pkg/distsql/BUILD.bazel @@ -83,7 +83,6 @@ go_test( "//pkg/util/collate", "//pkg/util/disk", "//pkg/util/execdetails", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/mock", "//pkg/util/paging", diff --git a/pkg/distsql/distsql_test.go b/pkg/distsql/distsql_test.go index b49a99accd2f6..461b29285d11d 100644 --- a/pkg/distsql/distsql_test.go +++ b/pkg/distsql/distsql_test.go @@ -31,7 +31,6 @@ import ( "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/disk" "github.com/pingcap/tidb/pkg/util/execdetails" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/mock" "github.com/pingcap/tipb/go-tipb" @@ -228,7 +227,7 @@ func (resp *mockResponse) Next(context.Context) (kv.ResultSubset, error) { if resp.count >= resp.total { return nil, nil } - numRows := mathutil.Min(resp.batch, resp.total-resp.count) + numRows := min(resp.batch, resp.total-resp.count) resp.count += numRows var chunks []tipb.Chunk @@ -245,7 +244,7 @@ func (resp *mockResponse) Next(context.Context) (kv.ResultSubset, error) { } else { chunks = make([]tipb.Chunk, 0) for numRows > 0 { - rows := mathutil.Min(numRows, 1024) + rows := min(numRows, 1024) numRows -= rows colTypes := make([]*types.FieldType, 4) diff --git a/pkg/distsql/select_result.go b/pkg/distsql/select_result.go index 6890f88a7ac2a..56f2228122bba 100644 --- a/pkg/distsql/select_result.go +++ b/pkg/distsql/select_result.go @@ -389,8 +389,8 @@ func (r *selectResult) fetchResp(ctx context.Context) error { return dbterror.ClassTiKV.Synthesize(terror.ErrCode(err.Code), err.Msg) } sessVars := r.ctx.GetSessionVars() - if atomic.LoadUint32(&sessVars.Killed) == 1 { - return errors.Trace(errQueryInterrupted) + if err = sessVars.SQLKiller.HandleSignal(); err != nil { + return err } sc := sessVars.StmtCtx for _, warning := range r.selectResp.Warnings { diff --git a/pkg/disttask/OWNERS b/pkg/disttask/OWNERS new file mode 100644 index 0000000000000..59e7c616f4389 --- /dev/null +++ b/pkg/disttask/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-disttask diff --git a/pkg/disttask/framework/dispatcher/dispatcher.go b/pkg/disttask/framework/dispatcher/dispatcher.go index 32f77b9da56b1..24f1b56f72a8c 100644 --- a/pkg/disttask/framework/dispatcher/dispatcher.go +++ b/pkg/disttask/framework/dispatcher/dispatcher.go @@ -459,9 +459,9 @@ func (d *BaseDispatcher) updateTask(taskState proto.TaskState, newSubTasks []*pr return err } -func (d *BaseDispatcher) onErrHandlingStage(receiveErr []error) error { +func (d *BaseDispatcher) onErrHandlingStage(receiveErrs []error) error { // 1. generate the needed task meta and subTask meta (dist-plan). - meta, err := d.OnErrStage(d.ctx, d, d.Task, receiveErr) + meta, err := d.OnErrStage(d.ctx, d, d.Task, receiveErrs) if err != nil { // OnErrStage must be retryable, if not, there will have resource leak for tasks. logutil.Logger(d.logCtx).Warn("handle error failed", zap.Error(err)) @@ -499,7 +499,7 @@ func (d *BaseDispatcher) onNextStage() (err error) { failpoint.Return(errors.New("mockDynamicDispatchErr")) }) - nextStep := d.GetNextStep(d, d.Task) + nextStep := d.GetNextStep(d.Task) logutil.Logger(d.logCtx).Info("onNextStage", zap.Int64("current-step", int64(d.Task.Step)), zap.Int64("next-step", int64(nextStep))) diff --git a/pkg/disttask/framework/dispatcher/dispatcher_manager.go b/pkg/disttask/framework/dispatcher/dispatcher_manager.go index 76e8af9f4eec9..96cc551874452 100644 --- a/pkg/disttask/framework/dispatcher/dispatcher_manager.go +++ b/pkg/disttask/framework/dispatcher/dispatcher_manager.go @@ -127,6 +127,9 @@ func NewManager(ctx context.Context, taskMgr TaskManager, serverID string) (*Man // Start the dispatcherManager, start the dispatchTaskLoop to start multiple dispatchers. func (dm *Manager) Start() { + failpoint.Inject("disableDispatcherManager", func() { + failpoint.Return() + }) dm.wg.Run(dm.dispatchTaskLoop) dm.wg.Run(dm.gcSubtaskHistoryTableLoop) dm.wg.Run(dm.cleanUpLoop) diff --git a/pkg/disttask/framework/dispatcher/dispatcher_test.go b/pkg/disttask/framework/dispatcher/dispatcher_test.go index 532c79d239261..e588c51d90339 100644 --- a/pkg/disttask/framework/dispatcher/dispatcher_test.go +++ b/pkg/disttask/framework/dispatcher/dispatcher_test.go @@ -69,7 +69,7 @@ func (*testDispatcherExt) IsRetryableErr(error) bool { return true } -func (*testDispatcherExt) GetNextStep(dispatcher.TaskHandle, *proto.Task) proto.Step { +func (*testDispatcherExt) GetNextStep(*proto.Task) proto.Step { return proto.StepDone } @@ -107,7 +107,7 @@ func (*numberExampleDispatcherExt) IsRetryableErr(error) bool { return true } -func (*numberExampleDispatcherExt) GetNextStep(_ dispatcher.TaskHandle, task *proto.Task) proto.Step { +func (*numberExampleDispatcherExt) GetNextStep(task *proto.Task) proto.Step { switch task.Step { case proto.StepInit: return proto.StepOne diff --git a/pkg/disttask/framework/dispatcher/interface.go b/pkg/disttask/framework/dispatcher/interface.go index 04d0b2cbc5117..6018cb453f34f 100644 --- a/pkg/disttask/framework/dispatcher/interface.go +++ b/pkg/disttask/framework/dispatcher/interface.go @@ -69,7 +69,7 @@ type Extension interface { // OnErrStage is called when: // 1. subtask is finished with error. // 2. task is cancelled after we have dispatched some subtasks. - OnErrStage(ctx context.Context, h TaskHandle, task *proto.Task, receiveErr []error) (subtaskMeta []byte, err error) + OnErrStage(ctx context.Context, h TaskHandle, task *proto.Task, receiveErrs []error) (subtaskMeta []byte, err error) // GetEligibleInstances is used to get the eligible instances for the task. // on certain condition we may want to use some instances to do the task, such as instances with more disk. @@ -81,7 +81,7 @@ type Extension interface { // GetNextStep is used to get the next step for the task. // if task runs successfully, it should go from StepInit to business steps, // then to StepDone, then dispatcher will mark it as finished. - GetNextStep(h TaskHandle, task *proto.Task) proto.Step + GetNextStep(task *proto.Task) proto.Step } // dispatcherFactoryFn is used to create a dispatcher. diff --git a/pkg/disttask/framework/framework_dynamic_dispatch_test.go b/pkg/disttask/framework/framework_dynamic_dispatch_test.go index 362ce482e933e..b97a6ef4ee827 100644 --- a/pkg/disttask/framework/framework_dynamic_dispatch_test.go +++ b/pkg/disttask/framework/framework_dynamic_dispatch_test.go @@ -61,7 +61,7 @@ func (*testDynamicDispatcherExt) OnErrStage(_ context.Context, _ dispatcher.Task return nil, nil } -func (dsp *testDynamicDispatcherExt) GetNextStep(_ dispatcher.TaskHandle, task *proto.Task) proto.Step { +func (dsp *testDynamicDispatcherExt) GetNextStep(task *proto.Task) proto.Step { switch task.Step { case proto.StepInit: return proto.StepOne diff --git a/pkg/disttask/framework/framework_err_handling_test.go b/pkg/disttask/framework/framework_err_handling_test.go index 977b24964d1a6..0a2f8f3ee5f82 100644 --- a/pkg/disttask/framework/framework_err_handling_test.go +++ b/pkg/disttask/framework/framework_err_handling_test.go @@ -78,7 +78,7 @@ func (*planErrDispatcherExt) IsRetryableErr(error) bool { return true } -func (p *planErrDispatcherExt) GetNextStep(_ dispatcher.TaskHandle, task *proto.Task) proto.Step { +func (p *planErrDispatcherExt) GetNextStep(task *proto.Task) proto.Step { switch task.Step { case proto.StepInit: return proto.StepOne @@ -112,7 +112,7 @@ func (*planNotRetryableErrDispatcherExt) IsRetryableErr(error) bool { return false } -func (p *planNotRetryableErrDispatcherExt) GetNextStep(dispatcher.TaskHandle, *proto.Task) proto.Step { +func (p *planNotRetryableErrDispatcherExt) GetNextStep(*proto.Task) proto.Step { return proto.StepDone } diff --git a/pkg/disttask/framework/framework_ha_test.go b/pkg/disttask/framework/framework_ha_test.go index cda8a5246491d..b2055dd9dbbb4 100644 --- a/pkg/disttask/framework/framework_ha_test.go +++ b/pkg/disttask/framework/framework_ha_test.go @@ -66,7 +66,7 @@ func (dsp *haTestDispatcherExt) OnNextSubtasksBatch(_ context.Context, _ dispatc return nil, nil } -func (*haTestDispatcherExt) OnErrStage(ctx context.Context, h dispatcher.TaskHandle, gTask *proto.Task, receiveErr []error) (subtaskMeta []byte, err error) { +func (*haTestDispatcherExt) OnErrStage(ctx context.Context, h dispatcher.TaskHandle, gTask *proto.Task, receiveErrs []error) (subtaskMeta []byte, err error) { return nil, nil } @@ -78,7 +78,7 @@ func (*haTestDispatcherExt) IsRetryableErr(error) bool { return true } -func (dsp *haTestDispatcherExt) GetNextStep(_ dispatcher.TaskHandle, task *proto.Task) proto.Step { +func (dsp *haTestDispatcherExt) GetNextStep(task *proto.Task) proto.Step { switch task.Step { case proto.StepInit: return proto.StepOne diff --git a/pkg/disttask/framework/framework_rollback_test.go b/pkg/disttask/framework/framework_rollback_test.go index 45dc968020c0c..7a053d415cbfd 100644 --- a/pkg/disttask/framework/framework_rollback_test.go +++ b/pkg/disttask/framework/framework_rollback_test.go @@ -65,7 +65,7 @@ func (*rollbackDispatcherExt) IsRetryableErr(error) bool { return true } -func (dsp *rollbackDispatcherExt) GetNextStep(_ dispatcher.TaskHandle, task *proto.Task) proto.Step { +func (dsp *rollbackDispatcherExt) GetNextStep(task *proto.Task) proto.Step { switch task.Step { case proto.StepInit: return proto.StepOne diff --git a/pkg/disttask/framework/framework_test.go b/pkg/disttask/framework/framework_test.go index 5c62ef5eba99f..c59a9a4fcd570 100644 --- a/pkg/disttask/framework/framework_test.go +++ b/pkg/disttask/framework/framework_test.go @@ -66,7 +66,7 @@ func (*testDispatcherExt) OnErrStage(_ context.Context, _ dispatcher.TaskHandle, return nil, nil } -func (dsp *testDispatcherExt) GetNextStep(_ dispatcher.TaskHandle, task *proto.Task) proto.Step { +func (dsp *testDispatcherExt) GetNextStep(task *proto.Task) proto.Step { switch task.Step { case proto.StepInit: return proto.StepOne @@ -525,13 +525,16 @@ func TestFrameworkSetLabel(t *testing.T) { RegisterTaskMeta(t, ctrl, &m, &testDispatcherExt{}) distContext := testkit.NewDistExecutionContext(t, 3) tk := testkit.NewTestKit(t, distContext.Store) + // 1. all "" role. DispatchTaskAndCheckSuccess("😁", t, &m) + // 2. one "background" role. tk.MustExec("set global tidb_service_scope=background") tk.MustQuery("select @@global.tidb_service_scope").Check(testkit.Rows("background")) tk.MustQuery("select @@tidb_service_scope").Check(testkit.Rows("background")) DispatchTaskAndCheckSuccess("😊", t, &m) + // 3. 2 "background" role. tk.MustExec("update mysql.dist_framework_meta set role = \"background\" where host = \":4001\"") DispatchTaskAndCheckSuccess("😆", t, &m) diff --git a/pkg/disttask/framework/scheduler/BUILD.bazel b/pkg/disttask/framework/scheduler/BUILD.bazel index b72f51527a99e..b9db1e97f2546 100644 --- a/pkg/disttask/framework/scheduler/BUILD.bazel +++ b/pkg/disttask/framework/scheduler/BUILD.bazel @@ -12,6 +12,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//br/pkg/lightning/common", + "//br/pkg/lightning/log", "//pkg/config", "//pkg/disttask/framework/dispatcher", "//pkg/disttask/framework/handle", @@ -20,10 +21,15 @@ go_library( "//pkg/disttask/framework/storage", "//pkg/domain/infosync", "//pkg/metrics", + "//pkg/parser/terror", "//pkg/resourcemanager/pool/spool", "//pkg/resourcemanager/util", + "//pkg/util", "//pkg/util/backoff", + "//pkg/util/dbterror", + "//pkg/util/gctuner", "//pkg/util/logutil", + "//pkg/util/memory", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@org_uber_go_zap//:zap", @@ -48,7 +54,7 @@ go_test( "//pkg/disttask/framework/proto", "//pkg/resourcemanager/pool/spool", "//pkg/resourcemanager/util", - "@com_github_pkg_errors//:errors", + "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", "@org_uber_go_mock//gomock", ], diff --git a/pkg/disttask/framework/scheduler/manager.go b/pkg/disttask/framework/scheduler/manager.go index d917d2cdac826..759e70612ab25 100644 --- a/pkg/disttask/framework/scheduler/manager.go +++ b/pkg/disttask/framework/scheduler/manager.go @@ -22,11 +22,14 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/br/pkg/lightning/common" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/disttask/framework/proto" "github.com/pingcap/tidb/pkg/domain/infosync" + "github.com/pingcap/tidb/pkg/metrics" "github.com/pingcap/tidb/pkg/resourcemanager/pool/spool" "github.com/pingcap/tidb/pkg/resourcemanager/util" + tidbutil "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" "go.uber.org/zap" ) @@ -34,9 +37,10 @@ import ( var ( schedulerPoolSize int32 = 4 // same as dispatcher - checkTime = 300 * time.Millisecond - retrySQLTimes = 3 - retrySQLInterval = 500 * time.Millisecond + checkTime = 300 * time.Millisecond + recoverMetaInterval = 90 * time.Second + retrySQLTimes = 30 + retrySQLInterval = 500 * time.Millisecond ) // ManagerBuilder is used to build a Manager. @@ -70,7 +74,7 @@ type Manager struct { } // id, it's the same as server id now, i.e. host:port. id string - wg sync.WaitGroup + wg tidbutil.WaitGroupWrapper ctx context.Context cancel context.CancelFunc logCtx context.Context @@ -97,36 +101,33 @@ func (b *ManagerBuilder) BuildManager(ctx context.Context, id string, taskTable return m, nil } -// Start starts the Manager. -func (m *Manager) Start() error { - logutil.Logger(m.logCtx).Debug("manager start") - var err error +func (m *Manager) initMeta() (err error) { for i := 0; i < retrySQLTimes; i++ { err = m.taskTable.StartManager(m.id, config.GetGlobalConfig().Instance.TiDBServiceScope) if err == nil { break } if i%10 == 0 { - logutil.Logger(m.logCtx).Warn("start manager failed", zap.String("scope", config.GetGlobalConfig().Instance.TiDBServiceScope), - zap.Int("retry times", retrySQLTimes), zap.Error(err)) + logutil.Logger(m.logCtx).Warn("start manager failed", + zap.String("scope", config.GetGlobalConfig().Instance.TiDBServiceScope), + zap.Int("retry times", i), + zap.Error(err)) } time.Sleep(retrySQLInterval) } - if err != nil { + return err +} + +// Start starts the Manager. +func (m *Manager) Start() error { + logutil.Logger(m.logCtx).Debug("manager start") + if err := m.initMeta(); err != nil { return err } - m.wg.Add(1) - go func() { - defer m.wg.Done() - m.fetchAndHandleRunnableTasks(m.ctx) - }() - - m.wg.Add(1) - go func() { - defer m.wg.Done() - m.fetchAndFastCancelTasks(m.ctx) - }() + m.wg.Run(m.fetchAndHandleRunnableTasksLoop) + m.wg.Run(m.fetchAndFastCancelTasksLoop) + m.wg.Run(m.recoverMetaLoop) return nil } @@ -138,12 +139,13 @@ func (m *Manager) Stop() { } // fetchAndHandleRunnableTasks fetches the runnable tasks from the global task table and handles them. -func (m *Manager) fetchAndHandleRunnableTasks(ctx context.Context) { +func (m *Manager) fetchAndHandleRunnableTasksLoop() { + defer tidbutil.Recover(metrics.LabelDomain, "fetchAndHandleRunnableTasksLoop", m.fetchAndHandleRunnableTasksLoop, false) ticker := time.NewTicker(checkTime) for { select { - case <-ctx.Done(): - logutil.Logger(m.logCtx).Info("fetchAndHandleRunnableTasks done") + case <-m.ctx.Done(): + logutil.Logger(m.logCtx).Info("fetchAndHandleRunnableTasksLoop done") return case <-ticker.C: tasks, err := m.taskTable.GetGlobalTasksInStates(proto.TaskStateRunning, proto.TaskStateReverting) @@ -151,19 +153,21 @@ func (m *Manager) fetchAndHandleRunnableTasks(ctx context.Context) { m.logErr(err) continue } - m.onRunnableTasks(ctx, tasks) + m.onRunnableTasks(m.ctx, tasks) } } } // fetchAndFastCancelTasks fetches the reverting/pausing tasks from the global task table and fast cancels them. -func (m *Manager) fetchAndFastCancelTasks(ctx context.Context) { +func (m *Manager) fetchAndFastCancelTasksLoop() { + defer tidbutil.Recover(metrics.LabelDomain, "fetchAndFastCancelTasksLoop", m.fetchAndFastCancelTasksLoop, false) + ticker := time.NewTicker(checkTime) for { select { - case <-ctx.Done(): + case <-m.ctx.Done(): m.cancelAllRunningTasks() - logutil.Logger(m.logCtx).Info("fetchAndFastCancelTasks done") + logutil.Logger(m.logCtx).Info("fetchAndFastCancelTasksLoop done") return case <-ticker.C: tasks, err := m.taskTable.GetGlobalTasksInStates(proto.TaskStateReverting) @@ -171,7 +175,7 @@ func (m *Manager) fetchAndFastCancelTasks(ctx context.Context) { m.logErr(err) continue } - m.onCanceledTasks(ctx, tasks) + m.onCanceledTasks(m.ctx, tasks) // cancel pending/running subtasks, and mark them as paused. pausingTasks, err := m.taskTable.GetGlobalTasksInStates(proto.TaskStatePausing) @@ -189,6 +193,9 @@ func (m *Manager) fetchAndFastCancelTasks(ctx context.Context) { // onRunnableTasks handles runnable tasks. func (m *Manager) onRunnableTasks(ctx context.Context, tasks []*proto.Task) { + if len(tasks) == 0 { + return + } tasks = m.filterAlreadyHandlingTasks(tasks) for _, task := range tasks { exist, err := m.taskTable.HasSubtasksInStates(m.id, task.ID, task.Step, @@ -221,6 +228,9 @@ func (m *Manager) onRunnableTasks(ctx context.Context, tasks []*proto.Task) { // onCanceledTasks cancels the running subtasks. func (m *Manager) onCanceledTasks(_ context.Context, tasks []*proto.Task) { + if len(tasks) == 0 { + return + } m.mu.RLock() defer m.mu.RUnlock() for _, task := range tasks { @@ -234,6 +244,9 @@ func (m *Manager) onCanceledTasks(_ context.Context, tasks []*proto.Task) { // onPausingTasks pauses/cancels the pending/running subtasks. func (m *Manager) onPausingTasks(tasks []*proto.Task) error { + if len(tasks) == 0 { + return nil + } m.mu.RLock() defer m.mu.RUnlock() for _, task := range tasks { @@ -250,6 +263,28 @@ func (m *Manager) onPausingTasks(tasks []*proto.Task) error { return nil } +// recoverMetaLoop inits and recovers dist_framework_meta for the tidb node running the scheduler manager. +// This is necessary when the TiDB node experiences a prolonged network partition +// and the dispatcher deletes `dist_framework_meta`. +// When the TiDB node recovers from the network partition, +// we need to re-insert the metadata. +func (m *Manager) recoverMetaLoop() { + defer tidbutil.Recover(metrics.LabelDomain, "recoverMetaLoop", m.recoverMetaLoop, false) + ticker := time.NewTicker(recoverMetaInterval) + for { + select { + case <-m.ctx.Done(): + logutil.Logger(m.logCtx).Info("recoverMetaLoop done") + return + case <-ticker.C: + if err := m.initMeta(); err != nil { + m.logErr(err) + continue + } + } + } +} + // cancelAllRunningTasks cancels all running tasks. func (m *Manager) cancelAllRunningTasks() { m.mu.RLock() @@ -382,13 +417,18 @@ func (m *Manager) removeHandlingTask(id int64) { } func (m *Manager) logErr(err error) { - logutil.Logger(m.logCtx).Error("task manager error", zap.Error(err), zap.Stack("stack")) + logutil.Logger(m.logCtx).Error("task manager met error", zap.Error(err), zap.Stack("stack")) } func (m *Manager) logErrAndPersist(err error, taskID int64) { m.logErr(err) + // TODO: use interface if each business to retry + if common.IsRetryableError(err) || isRetryableError(err) { + return + } err1 := m.taskTable.UpdateErrorToSubtask(m.id, taskID, err) if err1 != nil { logutil.Logger(m.logCtx).Error("update to subtask failed", zap.Error(err1), zap.Stack("stack")) } + logutil.Logger(m.logCtx).Error("update error to subtask", zap.Int64("task-id", taskID), zap.Error(err1), zap.Stack("stack")) } diff --git a/pkg/disttask/framework/scheduler/scheduler.go b/pkg/disttask/framework/scheduler/scheduler.go index bcefa5c79b65a..2af66f498c26f 100644 --- a/pkg/disttask/framework/scheduler/scheduler.go +++ b/pkg/disttask/framework/scheduler/scheduler.go @@ -22,6 +22,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/br/pkg/lightning/common" + "github.com/pingcap/tidb/br/pkg/lightning/log" "github.com/pingcap/tidb/pkg/disttask/framework/dispatcher" "github.com/pingcap/tidb/pkg/disttask/framework/handle" "github.com/pingcap/tidb/pkg/disttask/framework/proto" @@ -29,8 +30,12 @@ import ( "github.com/pingcap/tidb/pkg/disttask/framework/storage" "github.com/pingcap/tidb/pkg/domain/infosync" "github.com/pingcap/tidb/pkg/metrics" + "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/util/backoff" + "github.com/pingcap/tidb/pkg/util/dbterror" + "github.com/pingcap/tidb/pkg/util/gctuner" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/memory" "go.uber.org/zap" ) @@ -137,7 +142,7 @@ func (s *BaseScheduler) Run(ctx context.Context, task *proto.Task) (err error) { return s.updateErrorToSubtask(ctx, task.ID, err) } -func (s *BaseScheduler) run(ctx context.Context, task *proto.Task) error { +func (s *BaseScheduler) run(ctx context.Context, task *proto.Task) (resErr error) { if ctx.Err() != nil { s.onError(ctx.Err()) return s.getError() @@ -146,7 +151,14 @@ func (s *BaseScheduler) run(ctx context.Context, task *proto.Task) error { defer runCancel(ErrFinishSubtask) s.registerCancelFunc(runCancel) s.resetError() - logutil.Logger(s.logCtx).Info("scheduler run a step", zap.Any("step", task.Step), zap.Any("concurrency", task.Concurrency)) + stepLogger := log.BeginTask(logutil.Logger(s.logCtx).With( + zap.Any("step", task.Step), + zap.Uint64("concurrency", task.Concurrency), + zap.Float64("mem-limit-percent", gctuner.GlobalMemoryLimitTuner.GetPercentage()), + zap.String("server-mem-limit", memory.ServerMemoryLimitOriginText.Load()), + ), "schedule step") + // log as info level, subtask might be cancelled, let caller check it. + defer stepLogger.End(zap.InfoLevel, resErr) summary, cleanup, err := runSummaryCollectLoop(ctx, task, s.taskTable) if err != nil { @@ -472,13 +484,13 @@ func (s *BaseScheduler) onError(err error) { return } err = errors.Trace(err) - logutil.Logger(s.logCtx).Error("onError", zap.Error(err)) + logutil.Logger(s.logCtx).Error("onError", zap.Error(err), zap.Stack("stack")) s.mu.Lock() defer s.mu.Unlock() if s.mu.err == nil { s.mu.err = err - logutil.Logger(s.logCtx).Error("scheduler error", zap.Error(err)) + logutil.Logger(s.logCtx).Error("scheduler met first error", zap.Error(err)) } if s.mu.runtimeCancel != nil { @@ -575,6 +587,18 @@ func (s *BaseScheduler) finishSubtaskAndUpdateState(ctx context.Context, subtask metrics.IncDistTaskSubTaskCnt(subtask) } +// TODO: abstract interface for each business to implement it. +func isRetryableError(err error) bool { + originErr := errors.Cause(err) + if tErr, ok := originErr.(*terror.Error); ok { + sqlErr := terror.ToSQLError(tErr) + _, ok := dbterror.ReorgRetryableErrCodes[sqlErr.Code] + return ok + } + // can't retry Unknown err + return false +} + // markSubTaskCanceledOrFailed check the error type and decide the subtasks' state. // 1. Only cancel subtasks when meet ErrCancelSubtask. // 2. Only fail subtasks when meet non retryable error. @@ -584,7 +608,7 @@ func (s *BaseScheduler) markSubTaskCanceledOrFailed(ctx context.Context, subtask if ctx.Err() != nil && context.Cause(ctx) == ErrCancelSubtask { logutil.Logger(s.logCtx).Warn("subtask canceled", zap.Error(err)) s.updateSubtaskStateAndError(subtask, proto.TaskStateCanceled, nil) - } else if common.IsRetryableError(err) { + } else if common.IsRetryableError(err) || isRetryableError(err) { logutil.Logger(s.logCtx).Warn("met retryable error", zap.Error(err)) } else if errors.Cause(err) != context.Canceled { logutil.Logger(s.logCtx).Warn("subtask failed", zap.Error(err)) @@ -606,5 +630,8 @@ func (s *BaseScheduler) updateErrorToSubtask(ctx context.Context, taskID int64, return true, s.taskTable.UpdateErrorToSubtask(s.id, taskID, err) }, ) + if err1 == nil { + logger.Warn("update error to subtask success", zap.Error(err)) + } return err1 } diff --git a/pkg/disttask/framework/scheduler/scheduler_test.go b/pkg/disttask/framework/scheduler/scheduler_test.go index 0787cb3e3f0ce..7d81bd8de9d55 100644 --- a/pkg/disttask/framework/scheduler/scheduler_test.go +++ b/pkg/disttask/framework/scheduler/scheduler_test.go @@ -18,10 +18,10 @@ import ( "context" "testing" + "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/disttask/framework/mock" mockexecute "github.com/pingcap/tidb/pkg/disttask/framework/mock/execute" "github.com/pingcap/tidb/pkg/disttask/framework/proto" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" ) diff --git a/pkg/disttask/framework/storage/BUILD.bazel b/pkg/disttask/framework/storage/BUILD.bazel index 5a3ff2519b80e..913ece6c9ea6f 100644 --- a/pkg/disttask/framework/storage/BUILD.bazel +++ b/pkg/disttask/framework/storage/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//pkg/util/chunk", "//pkg/util/intest", "//pkg/util/logutil", + "//pkg/util/sqlescape", "//pkg/util/sqlexec", "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", diff --git a/pkg/disttask/framework/storage/table_test.go b/pkg/disttask/framework/storage/table_test.go index b711066211471..ae88f887660fd 100644 --- a/pkg/disttask/framework/storage/table_test.go +++ b/pkg/disttask/framework/storage/table_test.go @@ -417,6 +417,7 @@ func TestDistFrameworkMeta(t *testing.T) { require.NoError(t, sm.StartManager(":4000", "background")) require.NoError(t, sm.StartManager(":4001", "")) + require.NoError(t, sm.StartManager(":4002", "")) require.NoError(t, sm.StartManager(":4002", "background")) allNodes, err := sm.GetAllNodes() diff --git a/pkg/disttask/framework/storage/task_table.go b/pkg/disttask/framework/storage/task_table.go index 66736d7e6f3bf..cb25dc1662f12 100644 --- a/pkg/disttask/framework/storage/task_table.go +++ b/pkg/disttask/framework/storage/task_table.go @@ -32,6 +32,7 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/tikv/client-go/v2/util" "go.uber.org/zap" @@ -549,8 +550,7 @@ func (stm *TaskManager) StartSubtask(subtaskID int64) error { // StartManager insert the manager information into dist_framework_meta. func (stm *TaskManager) StartManager(tidbID string, role string) error { - _, err := stm.executeSQLWithNewSession(stm.ctx, `insert into mysql.dist_framework_meta values(%?, %?, DEFAULT) - on duplicate key update role = %?`, tidbID, role, role) + _, err := stm.executeSQLWithNewSession(stm.ctx, `replace into mysql.dist_framework_meta values(%?, %?, DEFAULT)`, tidbID, role) return err } @@ -638,30 +638,30 @@ func (stm *TaskManager) UpdateFailedSchedulerIDs(taskID int64, replaceNodes map[ } sql := new(strings.Builder) - if err := sqlexec.FormatSQL(sql, "update mysql.tidb_background_subtask set state = %? ,exec_id = (case ", proto.TaskStatePending); err != nil { + if err := sqlescape.FormatSQL(sql, "update mysql.tidb_background_subtask set state = %? ,exec_id = (case ", proto.TaskStatePending); err != nil { return err } for k, v := range replaceNodes { - if err := sqlexec.FormatSQL(sql, "when exec_id = %? then %? ", k, v); err != nil { + if err := sqlescape.FormatSQL(sql, "when exec_id = %? then %? ", k, v); err != nil { return err } } - if err := sqlexec.FormatSQL(sql, " end) where task_key = %? and state != \"succeed\" and exec_id in (", taskID); err != nil { + if err := sqlescape.FormatSQL(sql, " end) where task_key = %? and state != \"succeed\" and exec_id in (", taskID); err != nil { return err } i := 0 for k := range replaceNodes { if i != 0 { - if err := sqlexec.FormatSQL(sql, ","); err != nil { + if err := sqlescape.FormatSQL(sql, ","); err != nil { return err } } - if err := sqlexec.FormatSQL(sql, "%?", k); err != nil { + if err := sqlescape.FormatSQL(sql, "%?", k); err != nil { return err } i++ } - if err := sqlexec.FormatSQL(sql, ")"); err != nil { + if err := sqlescape.FormatSQL(sql, ")"); err != nil { return err } @@ -676,7 +676,7 @@ func (stm *TaskManager) CleanUpMeta(nodes []string) error { } return stm.WithNewTxn(stm.ctx, func(se sessionctx.Context) error { deleteSQL := new(strings.Builder) - if err := sqlexec.FormatSQL(deleteSQL, "delete from mysql.dist_framework_meta where host in("); err != nil { + if err := sqlescape.FormatSQL(deleteSQL, "delete from mysql.dist_framework_meta where host in("); err != nil { return err } deleteElems := make([]string, 0, len(nodes)) @@ -755,17 +755,17 @@ func (stm *TaskManager) UpdateGlobalTaskAndAddSubTasks(gTask *proto.Task, subtas } sql := new(strings.Builder) - if err := sqlexec.FormatSQL(sql, "insert into mysql.tidb_background_subtask \n"+ + if err := sqlescape.FormatSQL(sql, "insert into mysql.tidb_background_subtask \n"+ "(step, task_key, exec_id, meta, state, type, checkpoint, summary) values "); err != nil { return err } for i, subtask := range subtasks { if i != 0 { - if err := sqlexec.FormatSQL(sql, ","); err != nil { + if err := sqlescape.FormatSQL(sql, ","); err != nil { return err } } - if err := sqlexec.FormatSQL(sql, "(%?, %?, %?, %?, %?, %?, %?, %?)", + if err := sqlescape.FormatSQL(sql, "(%?, %?, %?, %?, %?, %?, %?, %?)", subtask.Step, gTask.ID, subtask.SchedulerID, subtask.Meta, subtaskState, proto.Type2Int(subtask.Type), []byte{}, "{}"); err != nil { return err } @@ -953,7 +953,7 @@ func (stm *TaskManager) TransferTasks2History(tasks []*proto.Task) error { } return stm.WithNewTxn(stm.ctx, func(se sessionctx.Context) error { insertSQL := new(strings.Builder) - if err := sqlexec.FormatSQL(insertSQL, "replace into mysql.tidb_global_task_history"+ + if err := sqlescape.FormatSQL(insertSQL, "replace into mysql.tidb_global_task_history"+ "(id, task_key, type, dispatcher_id, state, start_time, state_update_time,"+ "meta, concurrency, step, error) values"); err != nil { return err @@ -961,11 +961,11 @@ func (stm *TaskManager) TransferTasks2History(tasks []*proto.Task) error { for i, task := range tasks { if i != 0 { - if err := sqlexec.FormatSQL(insertSQL, ","); err != nil { + if err := sqlescape.FormatSQL(insertSQL, ","); err != nil { return err } } - if err := sqlexec.FormatSQL(insertSQL, "(%?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?)", + if err := sqlescape.FormatSQL(insertSQL, "(%?, %?, %?, %?, %?, %?, %?, %?, %?, %?, %?)", task.ID, task.Key, task.Type, task.DispatcherID, task.State, task.StartTime, task.StateUpdateTime, task.Meta, task.Concurrency, task.Step, serializeErr(task.Error)); err != nil { @@ -979,7 +979,7 @@ func (stm *TaskManager) TransferTasks2History(tasks []*proto.Task) error { // delete taskIDs tasks deleteSQL := new(strings.Builder) - if err := sqlexec.FormatSQL(deleteSQL, "delete from mysql.tidb_global_task where id in("); err != nil { + if err := sqlescape.FormatSQL(deleteSQL, "delete from mysql.tidb_global_task where id in("); err != nil { return err } deleteElems := make([]string, 0, len(tasks)) diff --git a/pkg/disttask/importinto/BUILD.bazel b/pkg/disttask/importinto/BUILD.bazel index 88e9d12db7f35..0ddf6147ebaeb 100644 --- a/pkg/disttask/importinto/BUILD.bazel +++ b/pkg/disttask/importinto/BUILD.bazel @@ -92,7 +92,7 @@ go_test( embed = [":importinto"], flaky = True, race = "on", - shard_count = 14, + shard_count = 15, deps = [ "//br/pkg/lightning/backend", "//br/pkg/lightning/backend/external", @@ -116,6 +116,7 @@ go_test( "//pkg/util/logutil", "//pkg/util/mock", "//pkg/util/sqlexec", + "@com_github_docker_go_units//:go-units", "@com_github_ngaut_pools//:pools", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/disttask/importinto/dispatcher.go b/pkg/disttask/importinto/dispatcher.go index 3a08e3ded3166..b141f1e46c730 100644 --- a/pkg/disttask/importinto/dispatcher.go +++ b/pkg/disttask/importinto/dispatcher.go @@ -381,7 +381,7 @@ func (*ImportDispatcherExt) IsRetryableErr(error) bool { } // GetNextStep implements dispatcher.Extension interface. -func (dsp *ImportDispatcherExt) GetNextStep(_ dispatcher.TaskHandle, task *proto.Task) proto.Step { +func (dsp *ImportDispatcherExt) GetNextStep(task *proto.Task) proto.Step { switch task.Step { case proto.StepInit: if dsp.GlobalSort { diff --git a/pkg/disttask/importinto/dispatcher_test.go b/pkg/disttask/importinto/dispatcher_test.go index d378799cd5cf5..1a1cb2c8b9a35 100644 --- a/pkg/disttask/importinto/dispatcher_test.go +++ b/pkg/disttask/importinto/dispatcher_test.go @@ -146,7 +146,7 @@ func (s *importIntoSuite) TestGetNextStep() { } ext := &ImportDispatcherExt{} for _, nextStep := range []proto.Step{StepImport, StepPostProcess, proto.StepDone} { - s.Equal(nextStep, ext.GetNextStep(nil, task)) + s.Equal(nextStep, ext.GetNextStep(task)) task.Step = nextStep } @@ -154,7 +154,7 @@ func (s *importIntoSuite) TestGetNextStep() { ext = &ImportDispatcherExt{GlobalSort: true} for _, nextStep := range []proto.Step{StepEncodeAndSort, StepMergeSort, StepWriteAndIngest, StepPostProcess, proto.StepDone} { - s.Equal(nextStep, ext.GetNextStep(nil, task)) + s.Equal(nextStep, ext.GetNextStep(task)) task.Step = nextStep } } diff --git a/pkg/disttask/importinto/dispatcher_testkit_test.go b/pkg/disttask/importinto/dispatcher_testkit_test.go index 2bd7c9b138d3e..e9106c7c4fb00 100644 --- a/pkg/disttask/importinto/dispatcher_testkit_test.go +++ b/pkg/disttask/importinto/dispatcher_testkit_test.go @@ -91,10 +91,10 @@ func TestDispatcherExtLocalSort(t *testing.T) { // to import stage, job should be running d := dsp.MockDispatcher(task) ext := importinto.ImportDispatcherExt{} - subtaskMetas, err := ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(d, task)) + subtaskMetas, err := ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(task)) require.NoError(t, err) require.Len(t, subtaskMetas, 1) - task.Step = ext.GetNextStep(d, task) + task.Step = ext.GetNextStep(task) require.Equal(t, importinto.StepImport, task.Step) gotJobInfo, err = importer.GetJob(ctx, conn, jobID, "root", true) require.NoError(t, err) @@ -112,20 +112,20 @@ func TestDispatcherExtLocalSort(t *testing.T) { require.NoError(t, manager.FinishSubtask(s.ID, []byte("{}"))) } // to post-process stage, job should be running and in validating step - subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(d, task)) + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(task)) require.NoError(t, err) require.Len(t, subtaskMetas, 1) - task.Step = ext.GetNextStep(d, task) + task.Step = ext.GetNextStep(task) require.Equal(t, importinto.StepPostProcess, task.Step) gotJobInfo, err = importer.GetJob(ctx, conn, jobID, "root", true) require.NoError(t, err) require.Equal(t, "running", gotJobInfo.Status) require.Equal(t, "validating", gotJobInfo.Step) // on next stage, job should be finished - subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(d, task)) + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(task)) require.NoError(t, err) require.Len(t, subtaskMetas, 0) - task.Step = ext.GetNextStep(d, task) + task.Step = ext.GetNextStep(task) require.Equal(t, proto.StepDone, task.Step) gotJobInfo, err = importer.GetJob(ctx, conn, jobID, "root", true) require.NoError(t, err) @@ -150,6 +150,12 @@ func TestDispatcherExtLocalSort(t *testing.T) { } func TestDispatcherExtGlobalSort(t *testing.T) { + // Domain start dispatcher manager automatically, we need to disable it as + // we test import task management in this case. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/disttask/framework/dispatcher/disableDispatcherManager", "return(true)")) + t.Cleanup(func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/disttask/framework/dispatcher/disableDispatcherManager")) + }) store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) pool := pools.NewResourcePool(func() (pools.Resource, error) { @@ -213,10 +219,10 @@ func TestDispatcherExtGlobalSort(t *testing.T) { ext := importinto.ImportDispatcherExt{ GlobalSort: true, } - subtaskMetas, err := ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(nil, task)) + subtaskMetas, err := ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(task)) require.NoError(t, err) require.Len(t, subtaskMetas, 2) - task.Step = ext.GetNextStep(nil, task) + task.Step = ext.GetNextStep(task) require.Equal(t, importinto.StepEncodeAndSort, task.Step) gotJobInfo, err = importer.GetJob(ctx, conn, jobID, "root", true) require.NoError(t, err) @@ -233,11 +239,9 @@ func TestDispatcherExtGlobalSort(t *testing.T) { require.NoError(t, err) sortStepMeta := &importinto.ImportStepMeta{ SortedDataMeta: &external.SortedKVMeta{ - MinKey: []byte("ta"), - MaxKey: []byte("tc"), + StartKey: []byte("ta"), + EndKey: []byte("tc"), TotalKVSize: 12, - DataFiles: []string{"gs://sort-bucket/data/1"}, - StatFiles: []string{"gs://sort-bucket/data/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -248,11 +252,9 @@ func TestDispatcherExtGlobalSort(t *testing.T) { }, SortedIndexMetas: map[int64]*external.SortedKVMeta{ 1: { - MinKey: []byte("ia"), - MaxKey: []byte("ic"), + StartKey: []byte("ia"), + EndKey: []byte("ic"), TotalKVSize: 12, - DataFiles: []string{"gs://sort-bucket/index/1"}, - StatFiles: []string{"gs://sort-bucket/index/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -274,10 +276,10 @@ func TestDispatcherExtGlobalSort(t *testing.T) { t.Cleanup(func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/disttask/importinto/forceMergeSort")) }) - subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(nil, task)) + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(task)) require.NoError(t, err) require.Len(t, subtaskMetas, 1) - task.Step = ext.GetNextStep(nil, task) + task.Step = ext.GetNextStep(task) require.Equal(t, importinto.StepMergeSort, task.Step) gotJobInfo, err = importer.GetJob(ctx, conn, jobID, "root", true) require.NoError(t, err) @@ -295,19 +297,11 @@ func TestDispatcherExtGlobalSort(t *testing.T) { mergeSortStepMeta := &importinto.MergeSortStepMeta{ KVGroup: "data", SortedKVMeta: external.SortedKVMeta{ - MinKey: []byte("ta"), - MaxKey: []byte("tc"), + StartKey: []byte("ta"), + EndKey: []byte("tc"), TotalKVSize: 12, - DataFiles: []string{"gs://sort-bucket/data/1"}, - StatFiles: []string{"gs://sort-bucket/data/1.stat"}, - MultipleFilesStats: []external.MultipleFilesStat{ - { - Filenames: [][2]string{ - {"gs://sort-bucket/data/1", "gs://sort-bucket/data/1.stat"}, - }, - }, - }, }, + DataFiles: []string{"gs://sort-bucket/data/1"}, } mergeSortStepMetaBytes, err := json.Marshal(mergeSortStepMeta) require.NoError(t, err) @@ -320,30 +314,30 @@ func TestDispatcherExtGlobalSort(t *testing.T) { t.Cleanup(func() { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/disttask/importinto/mockWriteIngestSpecs")) }) - subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(nil, task)) + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(task)) require.NoError(t, err) require.Len(t, subtaskMetas, 2) - task.Step = ext.GetNextStep(nil, task) + task.Step = ext.GetNextStep(task) require.Equal(t, importinto.StepWriteAndIngest, task.Step) gotJobInfo, err = importer.GetJob(ctx, conn, jobID, "root", true) require.NoError(t, err) require.Equal(t, "running", gotJobInfo.Status) require.Equal(t, "importing", gotJobInfo.Step) // on next stage, to post-process stage - subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(nil, task)) + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(task)) require.NoError(t, err) require.Len(t, subtaskMetas, 1) - task.Step = ext.GetNextStep(nil, task) + task.Step = ext.GetNextStep(task) require.Equal(t, importinto.StepPostProcess, task.Step) gotJobInfo, err = importer.GetJob(ctx, conn, jobID, "root", true) require.NoError(t, err) require.Equal(t, "running", gotJobInfo.Status) require.Equal(t, "validating", gotJobInfo.Step) // next stage, done - subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(nil, task)) + subtaskMetas, err = ext.OnNextSubtasksBatch(ctx, d, task, ext.GetNextStep(task)) require.NoError(t, err) require.Len(t, subtaskMetas, 0) - task.Step = ext.GetNextStep(nil, task) + task.Step = ext.GetNextStep(task) require.Equal(t, proto.StepDone, task.Step) gotJobInfo, err = importer.GetJob(ctx, conn, jobID, "root", true) require.NoError(t, err) diff --git a/pkg/disttask/importinto/encode_and_sort_operator.go b/pkg/disttask/importinto/encode_and_sort_operator.go index 2a74a45821920..651dfd0ac04a6 100644 --- a/pkg/disttask/importinto/encode_and_sort_operator.go +++ b/pkg/disttask/importinto/encode_and_sort_operator.go @@ -20,6 +20,7 @@ import ( "strconv" "time" + "github.com/docker/go-units" "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/lightning/backend/external" @@ -43,6 +44,9 @@ const ( // Note: this size is the memory taken by KV, not the size of taken by golang, // each KV has additional 24*2 bytes overhead for golang slice. indexKVTotalBufSize = size.GB - external.DefaultMemSizeLimit + // we use a larger block size for data KV group to support larger row. + // TODO: make it configurable? + dataKVGroupBlockSize = 32 * units.MiB ) // encodeAndSortOperator is an operator that encodes and sorts data. @@ -158,7 +162,9 @@ func newChunkWorker(ctx context.Context, op *encodeAndSortOperator, indexMemoryS builder := external.NewWriterBuilder(). SetOnCloseFunc(func(summary *external.WriterSummary) { op.sharedVars.mergeIndexSummary(indexID, summary) - }).SetMemorySizeLimit(indexMemorySizeLimit) + }). + SetMemorySizeLimit(indexMemorySizeLimit). + SetBlockSize(getKVGroupBlockSize("")) prefix := subtaskPrefix(op.taskID, op.subtaskID) // writer id for index: index/{indexID}/{workerID} writerID := path.Join("index", strconv.Itoa(int(indexID)), workerUUID) @@ -168,7 +174,8 @@ func newChunkWorker(ctx context.Context, op *encodeAndSortOperator, indexMemoryS // sorted data kv storage path: /{taskID}/{subtaskID}/data/{workerID} builder := external.NewWriterBuilder(). - SetOnCloseFunc(op.sharedVars.mergeDataSummary) + SetOnCloseFunc(op.sharedVars.mergeDataSummary). + SetBlockSize(getKVGroupBlockSize(dataKVGroup)) prefix := subtaskPrefix(op.taskID, op.subtaskID) // writer id for data: data/{workerID} writerID := path.Join("data", workerUUID) @@ -250,3 +257,10 @@ func getNumOfIndexGenKV(tblInfo *model.TableInfo) int { } return count } + +func getKVGroupBlockSize(group string) int { + if group == dataKVGroup { + return dataKVGroupBlockSize + } + return external.DefaultBlockSize +} diff --git a/pkg/disttask/importinto/encode_and_sort_operator_test.go b/pkg/disttask/importinto/encode_and_sort_operator_test.go index 89b30de5ab48e..6a737c5d25de5 100644 --- a/pkg/disttask/importinto/encode_and_sort_operator_test.go +++ b/pkg/disttask/importinto/encode_and_sort_operator_test.go @@ -23,6 +23,7 @@ import ( "testing" "time" + "github.com/docker/go-units" "github.com/pingcap/errors" "github.com/pingcap/tidb/br/pkg/lightning/backend" "github.com/pingcap/tidb/br/pkg/lightning/backend/external" @@ -201,3 +202,9 @@ func TestGetWriterMemorySizeLimit(t *testing.T) { }), c.createSQL) } } + +func TestGetKVGroupBlockSize(t *testing.T) { + require.Equal(t, 32*units.MiB, getKVGroupBlockSize(dataKVGroup)) + require.Equal(t, 16*units.MiB, getKVGroupBlockSize("")) + require.Equal(t, 16*units.MiB, getKVGroupBlockSize("1")) +} diff --git a/pkg/disttask/importinto/planner.go b/pkg/disttask/importinto/planner.go index 292ef218b9a07..6ea23054b4766 100644 --- a/pkg/disttask/importinto/planner.go +++ b/pkg/disttask/importinto/planner.go @@ -301,13 +301,14 @@ func generateMergeSortSpecs(planCtx planner.PlanCtx) ([]planner.PipelineSpec, er return nil, err } for kvGroup, kvMeta := range kvMetas { - length := len(kvMeta.DataFiles) if skipMergeSort(kvGroup, kvMeta.MultipleFilesStats) { logutil.Logger(planCtx.Ctx).Info("skip merge sort for kv group", zap.Int64("task-id", planCtx.TaskID), zap.String("kv-group", kvGroup)) continue } + dataFiles := kvMeta.GetDataFiles() + length := len(dataFiles) for start := 0; start < length; start += step { end := start + step if end > length { @@ -316,7 +317,7 @@ func generateMergeSortSpecs(planCtx planner.PlanCtx) ([]planner.PipelineSpec, er result = append(result, &MergeSortSpec{ MergeSortStepMeta: &MergeSortStepMeta{ KVGroup: kvGroup, - DataFiles: kvMeta.DataFiles[start:end], + DataFiles: dataFiles[start:end], }, }) } @@ -368,7 +369,7 @@ func generateWriteIngestSpecs(planCtx planner.PlanCtx, p *LogicalPlan) ([]planne logutil.Logger(ctx).Warn("close range splitter failed", zap.Error(err2)) } }() - startKey := tidbkv.Key(kvMeta.MinKey) + startKey := tidbkv.Key(kvMeta.StartKey) var endKey tidbkv.Key for { endKeyOfGroup, dataFiles, statFiles, rangeSplitKeys, err2 := splitter.SplitOneRangesGroup() @@ -376,13 +377,14 @@ func generateWriteIngestSpecs(planCtx planner.PlanCtx, p *LogicalPlan) ([]planne return err2 } if len(endKeyOfGroup) == 0 { - endKey = tidbkv.Key(kvMeta.MaxKey).Next() + endKey = kvMeta.EndKey } else { endKey = tidbkv.Key(endKeyOfGroup).Clone() } logutil.Logger(ctx).Info("kv range as subtask", zap.String("startKey", hex.EncodeToString(startKey)), - zap.String("endKey", hex.EncodeToString(endKey))) + zap.String("endKey", hex.EncodeToString(endKey)), + zap.Int("dataFiles", len(dataFiles))) if startKey.Cmp(endKey) >= 0 { return errors.Errorf("invalid kv range, startKey: %s, endKey: %s", hex.EncodeToString(startKey), hex.EncodeToString(endKey)) @@ -391,13 +393,13 @@ func generateWriteIngestSpecs(planCtx planner.PlanCtx, p *LogicalPlan) ([]planne m := &WriteIngestStepMeta{ KVGroup: kvGroup, SortedKVMeta: external.SortedKVMeta{ - MinKey: startKey, - MaxKey: endKey, - DataFiles: dataFiles, - StatFiles: statFiles, + StartKey: startKey, + EndKey: endKey, // this is actually an estimate, we don't know the exact size of the data TotalKVSize: uint64(config.DefaultBatchSize), }, + DataFiles: dataFiles, + StatFiles: statFiles, RangeSplitKeys: rangeSplitKeys, RangeSplitSize: splitter.GetRangeSplitSize(), } @@ -499,8 +501,14 @@ func getRangeSplitter(ctx context.Context, store storage.ExternalStorage, kvMeta zap.Int64("region-split-keys", regionSplitKeys)) return external.NewRangeSplitter( - ctx, kvMeta.DataFiles, kvMeta.StatFiles, store, - int64(config.DefaultBatchSize), int64(math.MaxInt64), - regionSplitSize, regionSplitKeys, + ctx, + kvMeta.GetDataFiles(), + kvMeta.GetStatFiles(), + store, + int64(config.DefaultBatchSize), + int64(math.MaxInt64), + regionSplitSize, + regionSplitKeys, + false, ) } diff --git a/pkg/disttask/importinto/planner_test.go b/pkg/disttask/importinto/planner_test.go index 95ccf9607a3fd..a3bb95e0a8b05 100644 --- a/pkg/disttask/importinto/planner_test.go +++ b/pkg/disttask/importinto/planner_test.go @@ -127,11 +127,9 @@ func genEncodeStepMetas(t *testing.T, cnt int) [][]byte { idxPrefix := fmt.Sprintf("i1_%d_", i) meta := &ImportStepMeta{ SortedDataMeta: &external.SortedKVMeta{ - MinKey: []byte(prefix + "a"), - MaxKey: []byte(prefix + "c"), + StartKey: []byte(prefix + "a"), + EndKey: []byte(prefix + "c"), TotalKVSize: 12, - DataFiles: []string{prefix + "/1"}, - StatFiles: []string{prefix + "/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -142,11 +140,9 @@ func genEncodeStepMetas(t *testing.T, cnt int) [][]byte { }, SortedIndexMetas: map[int64]*external.SortedKVMeta{ 1: { - MinKey: []byte(idxPrefix + "a"), - MaxKey: []byte(idxPrefix + "c"), + StartKey: []byte(idxPrefix + "a"), + EndKey: []byte(idxPrefix + "c"), TotalKVSize: 12, - DataFiles: []string{idxPrefix + "/1"}, - StatFiles: []string{idxPrefix + "/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -202,11 +198,9 @@ func genMergeStepMetas(t *testing.T, cnt int) [][]byte { meta := &MergeSortStepMeta{ KVGroup: "data", SortedKVMeta: external.SortedKVMeta{ - MinKey: []byte(prefix + "a"), - MaxKey: []byte(prefix + "c"), + StartKey: []byte(prefix + "a"), + EndKey: []byte(prefix + "c"), TotalKVSize: 12, - DataFiles: []string{prefix + "/1"}, - StatFiles: []string{prefix + "/1.stat"}, MultipleFilesStats: []external.MultipleFilesStat{ { Filenames: [][2]string{ @@ -231,17 +225,17 @@ func TestGetSortedKVMetas(t *testing.T) { require.Contains(t, kvMetas, "data") require.Contains(t, kvMetas, "1") // just check meta is merged, won't check all fields - require.Equal(t, []byte("d_0_a"), kvMetas["data"].MinKey) - require.Equal(t, []byte("d_2_c"), kvMetas["data"].MaxKey) - require.Equal(t, []byte("i1_0_a"), kvMetas["1"].MinKey) - require.Equal(t, []byte("i1_2_c"), kvMetas["1"].MaxKey) + require.Equal(t, []byte("d_0_a"), kvMetas["data"].StartKey) + require.Equal(t, []byte("d_2_c"), kvMetas["data"].EndKey) + require.Equal(t, []byte("i1_0_a"), kvMetas["1"].StartKey) + require.Equal(t, []byte("i1_2_c"), kvMetas["1"].EndKey) mergeStepMetas := genMergeStepMetas(t, 3) kvMetas2, err := getSortedKVMetasOfMergeStep(mergeStepMetas) require.NoError(t, err) require.Len(t, kvMetas2, 1) - require.Equal(t, []byte("x_0_a"), kvMetas2["data"].MinKey) - require.Equal(t, []byte("x_2_c"), kvMetas2["data"].MaxKey) + require.Equal(t, []byte("x_0_a"), kvMetas2["data"].StartKey) + require.Equal(t, []byte("x_2_c"), kvMetas2["data"].EndKey) // force merge sort for data kv require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/disttask/importinto/forceMergeSort", `return("data")`)) @@ -256,8 +250,8 @@ func TestGetSortedKVMetas(t *testing.T) { }) require.NoError(t, err) require.Len(t, allKVMetas, 2) - require.Equal(t, []byte("x_0_a"), allKVMetas["data"].MinKey) - require.Equal(t, []byte("x_2_c"), allKVMetas["data"].MaxKey) - require.Equal(t, []byte("i1_0_a"), allKVMetas["1"].MinKey) - require.Equal(t, []byte("i1_2_c"), allKVMetas["1"].MaxKey) + require.Equal(t, []byte("x_0_a"), allKVMetas["data"].StartKey) + require.Equal(t, []byte("x_2_c"), allKVMetas["data"].EndKey) + require.Equal(t, []byte("i1_0_a"), allKVMetas["1"].StartKey) + require.Equal(t, []byte("i1_2_c"), allKVMetas["1"].EndKey) } diff --git a/pkg/disttask/importinto/proto.go b/pkg/disttask/importinto/proto.go index 1db7f6cfae703..ba5c13439cef0 100644 --- a/pkg/disttask/importinto/proto.go +++ b/pkg/disttask/importinto/proto.go @@ -109,6 +109,8 @@ type MergeSortStepMeta struct { type WriteIngestStepMeta struct { KVGroup string `json:"kv-group"` external.SortedKVMeta `json:"sorted-kv-meta"` + DataFiles []string `json:"data-files"` + StatFiles []string `json:"stat-files"` RangeSplitKeys [][]byte `json:"range-split-keys"` RangeSplitSize int64 `json:"range-split-size"` diff --git a/pkg/disttask/importinto/scheduler.go b/pkg/disttask/importinto/scheduler.go index dd219c6f09764..6645e597d50a4 100644 --- a/pkg/disttask/importinto/scheduler.go +++ b/pkg/disttask/importinto/scheduler.go @@ -21,7 +21,6 @@ import ( "time" "github.com/docker/go-units" - "github.com/google/uuid" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/br/pkg/lightning/backend" @@ -304,21 +303,11 @@ func (m *mergeSortStepExecutor) RunSubtask(ctx context.Context, subtask *proto.S m.subtaskSortedKVMeta.MergeSummary(summary) } - writerID := uuid.New().String() prefix := subtaskPrefix(m.taskID, subtask.ID) - return external.MergeOverlappingFiles( - ctx, - sm.DataFiles, - m.controller.GlobalSortStore, - 64*1024, - prefix, - writerID, - 256*size.MB, - 8*1024, - 1*size.MB, - 8*1024, - onClose) + return external.MergeOverlappingFiles(ctx, sm.DataFiles, m.controller.GlobalSortStore, 64*1024, + prefix, getKVGroupBlockSize(sm.KVGroup), 8*1024, 1*size.MB, 8*1024, + onClose, int(m.taskMeta.Plan.ThreadCnt), false) } func (m *mergeSortStepExecutor) OnFinished(_ context.Context, subtask *proto.Subtask) error { @@ -375,12 +364,13 @@ func (e *writeAndIngestStepExecutor) RunSubtask(ctx context.Context, subtask *pr StorageURI: e.taskMeta.Plan.CloudStorageURI, DataFiles: sm.DataFiles, StatFiles: sm.StatFiles, - MinKey: sm.MinKey, - MaxKey: sm.MaxKey, + StartKey: sm.StartKey, + EndKey: sm.EndKey, SplitKeys: sm.RangeSplitKeys, RegionSplitSize: sm.RangeSplitSize, TotalFileSize: int64(sm.TotalKVSize), TotalKVCount: 0, + CheckHotspot: false, }, }, engineUUID) if err != nil { diff --git a/pkg/domain/BUILD.bazel b/pkg/domain/BUILD.bazel index 61dc017eaa180..fe9b2a28e7486 100644 --- a/pkg/domain/BUILD.bazel +++ b/pkg/domain/BUILD.bazel @@ -80,6 +80,7 @@ go_library( "//pkg/util/replayer", "//pkg/util/servermemorylimit", "//pkg/util/sqlexec", + "//pkg/util/sqlkiller", "//pkg/util/syncutil", "@com_github_burntsushi_toml//:toml", "@com_github_ngaut_pools//:pools", @@ -141,7 +142,6 @@ go_test( "//pkg/parser/terror", "//pkg/server", "//pkg/session", - "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", "//pkg/store/mockstore", "//pkg/testkit", diff --git a/pkg/domain/domain.go b/pkg/domain/domain.go index 72e174fb0a77b..1e2607a1da238 100644 --- a/pkg/domain/domain.go +++ b/pkg/domain/domain.go @@ -84,6 +84,7 @@ import ( "github.com/pingcap/tidb/pkg/util/replayer" "github.com/pingcap/tidb/pkg/util/servermemorylimit" "github.com/pingcap/tidb/pkg/util/sqlexec" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/pingcap/tidb/pkg/util/syncutil" "github.com/tikv/client-go/v2/tikv" "github.com/tikv/client-go/v2/txnkv/transaction" @@ -1480,7 +1481,7 @@ func (do *Domain) InitDistTaskLoop(ctx context.Context) error { func (do *Domain) distTaskFrameworkLoop(ctx context.Context, taskManager *storage.TaskManager, schedulerManager *scheduler.Manager, serverID string) { err := schedulerManager.Start() if err != nil { - logutil.BgLogger().Error("dist task scheduler manager failed", zap.Error(err)) + logutil.BgLogger().Error("dist task scheduler manager start failed", zap.Error(err)) return } logutil.BgLogger().Info("dist task scheduler manager started") @@ -2959,7 +2960,7 @@ func (s *SysProcesses) Track(id uint64, proc sessionctx.Context) error { } s.procMap[id] = proc proc.GetSessionVars().ConnectionID = id - atomic.StoreUint32(&proc.GetSessionVars().Killed, 0) + proc.GetSessionVars().SQLKiller.Reset() return nil } @@ -2970,7 +2971,7 @@ func (s *SysProcesses) UnTrack(id uint64) { if proc, ok := s.procMap[id]; ok { delete(s.procMap, id) proc.GetSessionVars().ConnectionID = 0 - atomic.StoreUint32(&proc.GetSessionVars().Killed, 0) + proc.GetSessionVars().SQLKiller.Reset() } } @@ -2993,6 +2994,6 @@ func (s *SysProcesses) KillSysProcess(id uint64) { s.mu.Lock() defer s.mu.Unlock() if proc, ok := s.procMap[id]; ok { - atomic.StoreUint32(&proc.GetSessionVars().Killed, 1) + proc.GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) } } diff --git a/pkg/domain/domain_test.go b/pkg/domain/domain_test.go index 00e365015998a..861b6b71b465e 100644 --- a/pkg/domain/domain_test.go +++ b/pkg/domain/domain_test.go @@ -35,7 +35,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/types" @@ -196,7 +195,7 @@ func TestStatWorkRecoverFromPanic(t *testing.T) { require.Equal(t, expiredTimeStamp, ts) // set expiredTimeStamp4PC to "2023-08-02 12:15:00" - ts, _ = types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.UTC), "2023-08-02 12:15:00") + ts, _ = types.ParseTimestamp(types.DefaultStmtNoWarningContext, "2023-08-02 12:15:00") dom.SetExpiredTimeStamp4PC(ts) expiredTimeStamp = dom.ExpiredTimeStamp4PC() require.Equal(t, expiredTimeStamp, ts) diff --git a/pkg/domain/infosync/tiflash_manager.go b/pkg/domain/infosync/tiflash_manager.go index 23c98b15df6d7..73500f9eea65c 100644 --- a/pkg/domain/infosync/tiflash_manager.go +++ b/pkg/domain/infosync/tiflash_manager.go @@ -498,9 +498,11 @@ func (m *TiFlashReplicaManagerCtx) GetRegionCountFromPD(ctx context.Context, tab endKey := tablecodec.EncodeTablePrefix(tableID + 1) startKey, endKey = m.codec.EncodeRegionRange(startKey, endKey) - p := fmt.Sprintf("/pd/api/v1/stats/region?start_key=%s&end_key=%s&count", - url.QueryEscape(string(startKey)), - url.QueryEscape(string(endKey))) + p := fmt.Sprintf("%s&count", + pdapi.RegionStatsByStartEndKey( + url.QueryEscape(string(startKey)), + url.QueryEscape(string(endKey)), + )) res, err := doRequest(ctx, "GetPDRegionStats", m.etcdCli.Endpoints(), p, "GET", nil) if err != nil { return errors.Trace(err) diff --git a/pkg/errno/errcode.go b/pkg/errno/errcode.go index 3f2bde189f854..0a20b0be530f9 100644 --- a/pkg/errno/errcode.go +++ b/pkg/errno/errcode.go @@ -1079,6 +1079,8 @@ const ( ErrLoadDataLocalUnsupportedOption = 8172 ErrLoadDataPreCheckFailed = 8173 ErrBRJobNotFound = 8174 + ErrMemoryExceedForQuery = 8175 + ErrMemoryExceedForInstance = 8176 // Error codes used by TiDB ddl package ErrUnsupportedDDLOperation = 8200 diff --git a/pkg/errno/errname.go b/pkg/errno/errname.go index 719f23d253868..631afe3d773b7 100644 --- a/pkg/errno/errname.go +++ b/pkg/errno/errname.go @@ -979,7 +979,7 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrInfoSchemaExpired: mysql.Message("Information schema is out of date: schema failed to update in 1 lease, please make sure TiDB can connect to TiKV", nil), ErrInfoSchemaChanged: mysql.Message("Information schema is changed during the execution of the statement(for example, table definition may be updated by other DDL ran in parallel). If you see this error often, try increasing `tidb_max_delta_schema_count`", nil), ErrBadNumber: mysql.Message("Bad Number", nil), - ErrCastAsSignedOverflow: mysql.Message("Cast to signed converted positive out-of-range integer to it's negative complement", nil), + ErrCastAsSignedOverflow: mysql.Message("Cast to signed converted positive out-of-range integer to its negative complement", nil), ErrCastNegIntAsUnsigned: mysql.Message("Cast to unsigned converted negative integer to it's positive complement", nil), ErrInvalidYearFormat: mysql.Message("invalid year format", nil), ErrInvalidYear: mysql.Message("invalid year", nil), @@ -1071,6 +1071,8 @@ var MySQLErrName = map[uint16]*mysql.ErrMessage{ ErrLoadDataInvalidOperation: mysql.Message("The current job status cannot perform the operation. %s", nil), ErrLoadDataLocalUnsupportedOption: mysql.Message("Unsupported option for LOAD DATA LOCAL INFILE: %s", nil), ErrLoadDataPreCheckFailed: mysql.Message("PreCheck failed: %s", nil), + ErrMemoryExceedForQuery: mysql.Message("Your query has been cancelled due to exceeding the allowed memory limit for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again.[conn=%d]", nil), + ErrMemoryExceedForInstance: mysql.Message("Your query has been cancelled due to exceeding the allowed memory limit for the tidb-server instance and this query is currently using the most memory. Please try narrowing your query scope or increase the tidb_server_memory_limit and try again.[conn=%d]", nil), ErrWarnOptimizerHintInvalidInteger: mysql.Message("integer value is out of range in '%s'", nil), ErrWarnOptimizerHintUnsupportedHint: mysql.Message("Optimizer hint %s is not supported by TiDB and is ignored", nil), diff --git a/pkg/executor/BUILD.bazel b/pkg/executor/BUILD.bazel index ec14ab06e530f..d269c7b11dcd1 100644 --- a/pkg/executor/BUILD.bazel +++ b/pkg/executor/BUILD.bazel @@ -220,7 +220,9 @@ go_library( "//pkg/util/servermemorylimit", "//pkg/util/set", "//pkg/util/size", + "//pkg/util/sqlescape", "//pkg/util/sqlexec", + "//pkg/util/sqlkiller", "//pkg/util/stmtsummary", "//pkg/util/stmtsummary/v2:stmtsummary", "//pkg/util/stringutil", @@ -320,7 +322,6 @@ go_test( "infoschema_reader_internal_test.go", "infoschema_reader_test.go", "insert_test.go", - "inspection_common_test.go", "inspection_result_test.go", "inspection_summary_test.go", "join_pkg_test.go", @@ -345,13 +346,11 @@ go_test( "show_stats_test.go", "show_test.go", "shuffle_test.go", - "simple_test.go", "slow_query_sql_test.go", "slow_query_test.go", "sort_test.go", "split_test.go", "stale_txn_test.go", - "statement_context_test.go", "stmtsummary_test.go", "table_readers_required_rows_test.go", "temporary_table_test.go", @@ -433,17 +432,16 @@ go_test( "//pkg/util/disk", "//pkg/util/execdetails", "//pkg/util/gcutil", - "//pkg/util/globalconn", "//pkg/util/hack", "//pkg/util/logutil", "//pkg/util/memory", "//pkg/util/mock", "//pkg/util/paging", "//pkg/util/pdapi", - "//pkg/util/plancodec", "//pkg/util/ranger", "//pkg/util/sem", "//pkg/util/set", + "//pkg/util/sqlkiller", "//pkg/util/stmtsummary/v2:stmtsummary", "//pkg/util/stringutil", "//pkg/util/syncutil", diff --git a/pkg/executor/adapter.go b/pkg/executor/adapter.go index 94e6914518008..aa12b8e8ef085 100644 --- a/pkg/executor/adapter.go +++ b/pkg/executor/adapter.go @@ -53,13 +53,13 @@ import ( "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/sessiontxn/staleread" "github.com/pingcap/tidb/pkg/types" + util2 "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/breakpoint" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/hint" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/pingcap/tidb/pkg/util/replayer" "github.com/pingcap/tidb/pkg/util/sqlexec" @@ -147,7 +147,7 @@ func (a *recordSet) Next(ctx context.Context, req *chunk.Chunk) (err error) { if r == nil { return } - err = errors.Errorf("%v", r) + err = util2.GetRecoverError(r) logutil.Logger(ctx).Error("execute sql panic", zap.String("sql", a.stmt.GetTextToLog(false)), zap.Stack("stack")) }() @@ -462,10 +462,14 @@ func (a *ExecStmt) Exec(ctx context.Context) (_ sqlexec.RecordSet, err error) { } return } - if str, ok := r.(string); !ok || !strings.Contains(str, memory.PanicMemoryExceedWarnMsg) { + if recoverdErr, ok := r.(error); !ok || !(exeerrors.ErrMemoryExceedForQuery.Equal(recoverdErr) || + exeerrors.ErrMemoryExceedForInstance.Equal(recoverdErr) || + exeerrors.ErrQueryInterrupted.Equal(recoverdErr) || + exeerrors.ErrMaxExecTimeExceeded.Equal(recoverdErr)) { panic(r) + } else { + err = recoverdErr } - err = errors.Errorf("%v", r) logutil.Logger(ctx).Error("execute sql panic", zap.String("sql", a.GetTextToLog(false)), zap.Stack("stack")) }() @@ -1208,7 +1212,7 @@ func (a *ExecStmt) buildExecutor() (exec.Executor, error) { func (a *ExecStmt) openExecutor(ctx context.Context, e exec.Executor) (err error) { defer func() { if r := recover(); r != nil { - err = errors.New(fmt.Sprint(r)) + err = util2.GetRecoverError(r) } }() start := time.Now() diff --git a/pkg/executor/adapter_test.go b/pkg/executor/adapter_test.go index 1956fa4bda6d6..bc5d08453aa8e 100644 --- a/pkg/executor/adapter_test.go +++ b/pkg/executor/adapter_test.go @@ -16,32 +16,12 @@ package executor_test import ( "testing" - "time" "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/sessionctx/variable" - "github.com/pingcap/tidb/pkg/testkit" "github.com/stretchr/testify/require" ) -func TestQueryTime(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - costTime := time.Since(tk.Session().GetSessionVars().StartTime) - require.Less(t, costTime, time.Second) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t values(1), (1), (1), (1), (1)") - tk.MustExec("select * from t t1 join t t2 on t1.a = t2.a") - - costTime = time.Since(tk.Session().GetSessionVars().StartTime) - require.Less(t, costTime, time.Second) -} - func TestFormatSQL(t *testing.T) { val := executor.FormatSQL("aaaa") require.Equal(t, "aaaa", val.String()) diff --git a/pkg/executor/aggfuncs/BUILD.bazel b/pkg/executor/aggfuncs/BUILD.bazel index 527a02b2fcf5f..9336517d7ecec 100644 --- a/pkg/executor/aggfuncs/BUILD.bazel +++ b/pkg/executor/aggfuncs/BUILD.bazel @@ -97,7 +97,6 @@ go_test( "//pkg/parser/mysql", "//pkg/planner/util", "//pkg/sessionctx", - "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", "//pkg/testkit", "//pkg/testkit/testsetup", diff --git a/pkg/executor/aggfuncs/aggfunc_test.go b/pkg/executor/aggfuncs/aggfunc_test.go index 9b6c1ad5acb74..7b24739078281 100644 --- a/pkg/executor/aggfuncs/aggfunc_test.go +++ b/pkg/executor/aggfuncs/aggfunc_test.go @@ -301,7 +301,7 @@ func testMergePartialResult(t *testing.T, p aggTest) { if p.funcName == ast.AggFuncJsonArrayagg { dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeJSON)) } - result, err := dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[0], ctor) + result, err := dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[0], ctor) require.NoError(t, err) require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[0]) @@ -328,7 +328,7 @@ func testMergePartialResult(t *testing.T, p aggTest) { if p.funcName == ast.AggFuncJsonArrayagg { dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeJSON)) } - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[1], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[1], ctor) require.NoError(t, err) require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[1]) _, err = finalFunc.MergePartialResult(ctx, partialResult, finalPr) @@ -351,7 +351,7 @@ func testMergePartialResult(t *testing.T, p aggTest) { if p.funcName == ast.AggFuncJsonArrayagg { dt = resultChk.GetRow(0).GetDatum(0, types.NewFieldType(mysql.TypeJSON)) } - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[2], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[2], ctor) require.NoError(t, err) require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[2]) } @@ -410,7 +410,7 @@ func testMultiArgsMergePartialResult(t *testing.T, ctx sessionctx.Context, p mul err = partialFunc.AppendFinalResult2Chunk(ctx, partialResult, resultChk) require.NoError(t, err) dt := resultChk.GetRow(0).GetDatum(0, p.retType) - result, err := dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[0], ctor) + result, err := dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[0], ctor) require.NoError(t, err) require.Zero(t, result) @@ -431,7 +431,7 @@ func testMultiArgsMergePartialResult(t *testing.T, ctx sessionctx.Context, p mul err = partialFunc.AppendFinalResult2Chunk(ctx, partialResult, resultChk) require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, p.retType) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[1], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[1], ctor) require.NoError(t, err) require.Zero(t, result) _, err = finalFunc.MergePartialResult(ctx, partialResult, finalPr) @@ -442,7 +442,7 @@ func testMultiArgsMergePartialResult(t *testing.T, ctx sessionctx.Context, p mul require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, p.retType) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[2], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[2], ctor) require.NoError(t, err) require.Zero(t, result) } @@ -540,7 +540,7 @@ func testAggFunc(t *testing.T, p aggTest) { err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err := dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[1], ctor) + result, err := dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[1], ctor) require.NoError(t, err) require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[1]) @@ -550,7 +550,7 @@ func testAggFunc(t *testing.T, p aggTest) { err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[0], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[0], ctor) require.NoError(t, err) require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[0]) @@ -583,7 +583,7 @@ func testAggFunc(t *testing.T, p aggTest) { err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[1], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[1], ctor) require.NoError(t, err) require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[1]) @@ -593,7 +593,7 @@ func testAggFunc(t *testing.T, p aggTest) { err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[0], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[0], ctor) require.NoError(t, err) require.Equalf(t, 0, result, "%v != %v", dt.String(), p.results[0]) } @@ -630,7 +630,7 @@ func testAggFuncWithoutDistinct(t *testing.T, p aggTest) { err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err := dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[1], ctor) + result, err := dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[1], ctor) require.NoError(t, err) require.Zerof(t, result, "%v != %v", dt.String(), p.results[1]) @@ -640,7 +640,7 @@ func testAggFuncWithoutDistinct(t *testing.T, p aggTest) { err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[0], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[0], ctor) require.NoError(t, err) require.Zerof(t, result, "%v != %v", dt.String(), p.results[0]) } @@ -708,7 +708,7 @@ func testMultiArgsAggFunc(t *testing.T, ctx sessionctx.Context, p multiArgsAggTe err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err := dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[1], ctor) + result, err := dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[1], ctor) require.NoError(t, err) require.Zerof(t, result, "%v != %v", dt.String(), p.results[1]) @@ -718,7 +718,7 @@ func testMultiArgsAggFunc(t *testing.T, ctx sessionctx.Context, p multiArgsAggTe err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[0], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[0], ctor) require.NoError(t, err) require.Zerof(t, result, "%v != %v", dt.String(), p.results[0]) @@ -751,7 +751,7 @@ func testMultiArgsAggFunc(t *testing.T, ctx sessionctx.Context, p multiArgsAggTe err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[1], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[1], ctor) require.NoError(t, err) require.Zerof(t, result, "%v != %v", dt.String(), p.results[1]) @@ -761,7 +761,7 @@ func testMultiArgsAggFunc(t *testing.T, ctx sessionctx.Context, p multiArgsAggTe err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt = resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err = dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[0], ctor) + result, err = dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[0], ctor) require.NoError(t, err) require.Zero(t, result) } diff --git a/pkg/executor/aggfuncs/builder.go b/pkg/executor/aggfuncs/builder.go index 487f5a56626aa..ab759e9cf6764 100644 --- a/pkg/executor/aggfuncs/builder.go +++ b/pkg/executor/aggfuncs/builder.go @@ -704,7 +704,7 @@ func buildLeadLag(ctx sessionctx.Context, aggFuncDesc *aggregation.AggFuncDesc, if len(aggFuncDesc.Args) == 3 { defaultExpr = aggFuncDesc.Args[2] if et, ok := defaultExpr.(*expression.Constant); ok { - res, err1 := et.Value.ConvertTo(ctx.GetSessionVars().StmtCtx, aggFuncDesc.RetTp) + res, err1 := et.Value.ConvertTo(ctx.GetSessionVars().StmtCtx.TypeCtx(), aggFuncDesc.RetTp) if err1 == nil { defaultExpr = &expression.Constant{Value: res, RetType: aggFuncDesc.RetTp} } diff --git a/pkg/executor/aggfuncs/func_count_test.go b/pkg/executor/aggfuncs/func_count_test.go index ce9a45dbcf540..58b8f404bd7d8 100644 --- a/pkg/executor/aggfuncs/func_count_test.go +++ b/pkg/executor/aggfuncs/func_count_test.go @@ -23,7 +23,6 @@ import ( "github.com/pingcap/tidb/pkg/executor/aggfuncs" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/mock" @@ -158,7 +157,7 @@ func TestMemCount(t *testing.T) { } func TestWriteTime(t *testing.T) { - tt, err := types.ParseDate(stmtctx.NewStmtCtx(), "2020-11-11") + tt, err := types.ParseDate(types.DefaultStmtNoWarningContext, "2020-11-11") require.NoError(t, err) buf := make([]byte, 16) diff --git a/pkg/executor/aggfuncs/func_group_concat.go b/pkg/executor/aggfuncs/func_group_concat.go index afe1d204f7fb9..c67a6d7805972 100644 --- a/pkg/executor/aggfuncs/func_group_concat.go +++ b/pkg/executor/aggfuncs/func_group_concat.go @@ -304,7 +304,7 @@ func (h topNRows) Len() int { func (h topNRows) Less(i, j int) bool { n := len(h.rows[i].byItems) for k := 0; k < n; k++ { - ret, err := h.rows[i].byItems[k].Compare(h.sctx.GetSessionVars().StmtCtx, h.rows[j].byItems[k], h.collators[k]) + ret, err := h.rows[i].byItems[k].Compare(h.sctx.GetSessionVars().StmtCtx.TypeCtx(), h.rows[j].byItems[k], h.collators[k]) if err != nil { h.err = err return false diff --git a/pkg/executor/aggfuncs/window_func_test.go b/pkg/executor/aggfuncs/window_func_test.go index 3c109aae4a95a..f657d01a89b49 100644 --- a/pkg/executor/aggfuncs/window_func_test.go +++ b/pkg/executor/aggfuncs/window_func_test.go @@ -76,7 +76,7 @@ func testWindowFunc(t *testing.T, p windowTest) { err = finalFunc.AppendFinalResult2Chunk(ctx, finalPr, resultChk) require.NoError(t, err) dt := resultChk.GetRow(0).GetDatum(0, desc.RetTp) - result, err := dt.Compare(ctx.GetSessionVars().StmtCtx, &p.results[i], collate.GetCollator(desc.RetTp.GetCollate())) + result, err := dt.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &p.results[i], collate.GetCollator(desc.RetTp.GetCollate())) require.NoError(t, err) require.Equal(t, 0, result) resultChk.Reset() diff --git a/pkg/executor/aggregate/BUILD.bazel b/pkg/executor/aggregate/BUILD.bazel index a60265d2b2a1e..ec7cf5a38c85d 100644 --- a/pkg/executor/aggregate/BUILD.bazel +++ b/pkg/executor/aggregate/BUILD.bazel @@ -24,6 +24,7 @@ go_library( "//pkg/sessionctx/stmtctx", "//pkg/sessionctx/variable", "//pkg/types", + "//pkg/util", "//pkg/util/channel", "//pkg/util/chunk", "//pkg/util/codec", diff --git a/pkg/executor/aggregate/agg_util.go b/pkg/executor/aggregate/agg_util.go index fadc4cdb61c6d..9b7a75edb8acc 100644 --- a/pkg/executor/aggregate/agg_util.go +++ b/pkg/executor/aggregate/agg_util.go @@ -22,7 +22,6 @@ import ( "sync/atomic" "time" - "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/executor/aggfuncs" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" @@ -30,6 +29,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/execdetails" @@ -59,8 +59,8 @@ func closeBaseExecutor(b *exec.BaseExecutor) { } func recoveryHashAgg(output chan *AfFinalResult, r interface{}) { - err := errors.Errorf("%v", r) - output <- &AfFinalResult{err: errors.Errorf("%v", r)} + err := util.GetRecoverError(r) + output <- &AfFinalResult{err: err} logutil.BgLogger().Error("parallel hash aggregation panicked", zap.Error(err), zap.Stack("stack")) } diff --git a/pkg/executor/analyze.go b/pkg/executor/analyze.go index 2ef3d3ee5d21b..c878d52b61c31 100644 --- a/pkg/executor/analyze.go +++ b/pkg/executor/analyze.go @@ -21,7 +21,6 @@ import ( "net" "strconv" "strings" - "sync/atomic" "time" "github.com/pingcap/errors" @@ -43,8 +42,8 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/pingcap/tipb/go-tipb" "go.uber.org/zap" @@ -110,8 +109,7 @@ func (e *AnalyzeExec) Next(ctx context.Context, _ *chunk.Chunk) error { // Start workers with channel to collect results. taskCh := make(chan *analyzeTask, concurrency) - resultChLen := min(concurrency*2, len(tasks)) - resultsCh := make(chan *statistics.AnalyzeResults, resultChLen) + resultsCh := make(chan *statistics.AnalyzeResults, 1) for i := 0; i < concurrency; i++ { e.wg.Run(func() { e.analyzeWorker(taskCh, resultsCh) }) } @@ -291,7 +289,7 @@ func (e *AnalyzeExec) saveV2AnalyzeOpts() error { } } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "REPLACE INTO mysql.analyze_options (table_id,sample_num,sample_rate,buckets,topn,column_choice,column_ids) VALUES ") + sqlescape.MustFormatSQL(sql, "REPLACE INTO mysql.analyze_options (table_id,sample_num,sample_rate,buckets,topn,column_choice,column_ids) VALUES ") idx := 0 for _, opts := range toSaveMap { sampleNum := opts.RawOpts[ast.AnalyzeOptNumSamples] @@ -310,9 +308,9 @@ func (e *AnalyzeExec) saveV2AnalyzeOpts() error { colIDs[i] = strconv.FormatInt(colInfo.ID, 10) } colIDStrs := strings.Join(colIDs, ",") - sqlexec.MustFormatSQL(sql, "(%?,%?,%?,%?,%?,%?,%?)", opts.PhyTableID, sampleNum, sampleRate, buckets, topn, colChoice, colIDStrs) + sqlescape.MustFormatSQL(sql, "(%?,%?,%?,%?,%?,%?,%?)", opts.PhyTableID, sampleNum, sampleRate, buckets, topn, colChoice, colIDStrs) if idx < len(toSaveMap)-1 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } idx++ } @@ -398,10 +396,10 @@ func (e *AnalyzeExec) handleResultsError( } else { finishJobWithLog(e.Ctx(), results.Job, nil) } - if atomic.LoadUint32(&e.Ctx().GetSessionVars().Killed) == 1 { - finishJobWithLog(e.Ctx(), results.Job, exeerrors.ErrQueryInterrupted) + if err := e.Ctx().GetSessionVars().SQLKiller.HandleSignal(); err != nil { + finishJobWithLog(e.Ctx(), results.Job, err) results.DestroyAndPutToPool() - return errors.Trace(exeerrors.ErrQueryInterrupted) + return err } results.DestroyAndPutToPool() } @@ -424,7 +422,7 @@ func (e *AnalyzeExec) handleResultsErrorWithConcurrency(ctx context.Context, sta saveResultsCh := make(chan *statistics.AnalyzeResults, partitionStatsConcurrency) errCh := make(chan error, partitionStatsConcurrency) for i := 0; i < partitionStatsConcurrency; i++ { - worker := newAnalyzeSaveStatsWorker(saveResultsCh, subSctxs[i], errCh, &e.Ctx().GetSessionVars().Killed) + worker := newAnalyzeSaveStatsWorker(saveResultsCh, subSctxs[i], errCh, &e.Ctx().GetSessionVars().SQLKiller) ctx1 := kv.WithInternalSourceType(context.Background(), kv.InternalTxnStats) wg.Run(func() { worker.run(ctx1, e.Ctx().GetSessionVars().EnableAnalyzeSnapshot) @@ -434,9 +432,9 @@ func (e *AnalyzeExec) handleResultsErrorWithConcurrency(ctx context.Context, sta panicCnt := 0 var err error for panicCnt < statsConcurrency { - if atomic.LoadUint32(&e.Ctx().GetSessionVars().Killed) == 1 { + if err := e.Ctx().GetSessionVars().SQLKiller.HandleSignal(); err != nil { close(saveResultsCh) - return errors.Trace(exeerrors.ErrQueryInterrupted) + return err } results, ok := <-resultsCh if !ok { diff --git a/pkg/executor/analyze_col.go b/pkg/executor/analyze_col.go index 7d8cdc7994ef7..44ca539701227 100644 --- a/pkg/executor/analyze_col.go +++ b/pkg/executor/analyze_col.go @@ -19,10 +19,8 @@ import ( "fmt" "math" "strings" - "sync/atomic" "time" - "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/distsql" "github.com/pingcap/tidb/pkg/domain" @@ -36,7 +34,6 @@ import ( "github.com/pingcap/tidb/pkg/tablecodec" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/ranger" "github.com/pingcap/tipb/go-tipb" @@ -179,8 +176,8 @@ func (e *AnalyzeColumnsExec) buildStats(ranges []*ranger.Range, needExtStats boo dom := domain.GetDomain(e.ctx) dom.SysProcTracker().KillSysProcess(dom.GetAutoAnalyzeProcID()) }) - if atomic.LoadUint32(&e.ctx.GetSessionVars().Killed) == 1 { - return nil, nil, nil, nil, nil, errors.Trace(exeerrors.ErrQueryInterrupted) + if err := e.ctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil { + return nil, nil, nil, nil, nil, err } failpoint.Inject("mockSlowAnalyzeV1", func() { time.Sleep(1000 * time.Second) diff --git a/pkg/executor/analyze_col_v2.go b/pkg/executor/analyze_col_v2.go index d46360e3d725d..6faa8e0f529a4 100644 --- a/pkg/executor/analyze_col_v2.go +++ b/pkg/executor/analyze_col_v2.go @@ -19,7 +19,6 @@ import ( stderrors "errors" "math" "sort" - "sync/atomic" "time" "github.com/pingcap/errors" @@ -40,7 +39,6 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/ranger" @@ -277,8 +275,8 @@ func (e *AnalyzeColumnsExecV2) buildSamplingStats( sc := e.ctx.GetSessionVars().StmtCtx // Start workers to merge the result from collectors. - mergeResultCh := make(chan *samplingMergeResult, samplingStatsConcurrency) - mergeTaskCh := make(chan []byte, samplingStatsConcurrency) + mergeResultCh := make(chan *samplingMergeResult, 1) + mergeTaskCh := make(chan []byte, 1) var taskEg errgroup.Group // Start read data from resultHandler and send them to mergeTaskCh. taskEg.Go(func() (err error) { @@ -888,8 +886,8 @@ func readDataAndSendTask(ctx sessionctx.Context, handler *tableResultHandler, me dom := domain.GetDomain(ctx) dom.SysProcTracker().KillSysProcess(dom.GetAutoAnalyzeProcID()) }) - if atomic.LoadUint32(&ctx.GetSessionVars().Killed) == 1 { - return errors.Trace(exeerrors.ErrQueryInterrupted) + if err := ctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil { + return err } failpoint.Inject("mockSlowAnalyzeV2", func() { time.Sleep(1000 * time.Second) diff --git a/pkg/executor/analyze_idx.go b/pkg/executor/analyze_idx.go index 64531547ee5eb..67f18e45a317e 100644 --- a/pkg/executor/analyze_idx.go +++ b/pkg/executor/analyze_idx.go @@ -17,7 +17,6 @@ package executor import ( "context" "math" - "sync/atomic" "time" "github.com/pingcap/errors" @@ -31,7 +30,6 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/ranger" "github.com/pingcap/tipb/go-tipb" @@ -203,8 +201,8 @@ func (e *AnalyzeIndexExec) buildStatsFromResult(result distsql.SelectResult, nee dom := domain.GetDomain(e.ctx) dom.SysProcTracker().KillSysProcess(dom.GetAutoAnalyzeProcID()) }) - if atomic.LoadUint32(&e.ctx.GetSessionVars().Killed) == 1 { - return nil, nil, nil, nil, errors.Trace(exeerrors.ErrQueryInterrupted) + if err := e.ctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil { + return nil, nil, nil, nil, err } failpoint.Inject("mockSlowAnalyzeIndex", func() { time.Sleep(1000 * time.Second) diff --git a/pkg/executor/analyze_test.go b/pkg/executor/analyze_test.go index 9cbd34c415772..d97003a305308 100644 --- a/pkg/executor/analyze_test.go +++ b/pkg/executor/analyze_test.go @@ -37,7 +37,7 @@ import ( func checkHistogram(sc *stmtctx.StatementContext, hg *statistics.Histogram) (bool, error) { for i := 0; i < len(hg.Buckets); i++ { lower, upper := hg.GetLower(i), hg.GetUpper(i) - cmp, err := upper.Compare(sc, lower, collate.GetBinaryCollator()) + cmp, err := upper.Compare(sc.TypeCtx(), lower, collate.GetBinaryCollator()) if cmp < 0 || err != nil { return false, err } @@ -45,7 +45,7 @@ func checkHistogram(sc *stmtctx.StatementContext, hg *statistics.Histogram) (boo continue } previousUpper := hg.GetUpper(i - 1) - cmp, err = lower.Compare(sc, previousUpper, collate.GetBinaryCollator()) + cmp, err = lower.Compare(sc.TypeCtx(), previousUpper, collate.GetBinaryCollator()) if cmp <= 0 || err != nil { return false, err } diff --git a/pkg/executor/analyze_utils.go b/pkg/executor/analyze_utils.go index 089f34bd4c7b4..5a917177482a3 100644 --- a/pkg/executor/analyze_utils.go +++ b/pkg/executor/analyze_utils.go @@ -17,7 +17,6 @@ package executor import ( "context" "strconv" - "strings" "sync" "github.com/pingcap/errors" @@ -25,7 +24,6 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics" - "github.com/pingcap/tidb/pkg/util/memory" "go.uber.org/atomic" ) @@ -59,9 +57,6 @@ func getAnalyzePanicErr(r interface{}) error { if msg == globalPanicAnalyzeMemoryExceed { return errAnalyzeOOM } - if strings.Contains(msg, memory.PanicMemoryExceedWarnMsg) { - return errors.Errorf("%s, %s", msg, errAnalyzeOOM) - } } if err, ok := r.(error); ok { if err.Error() == globalPanicAnalyzeMemoryExceed { diff --git a/pkg/executor/analyze_utils_test.go b/pkg/executor/analyze_utils_test.go index eb764ecb3f6d2..72202b43a0e9c 100644 --- a/pkg/executor/analyze_utils_test.go +++ b/pkg/executor/analyze_utils_test.go @@ -18,12 +18,12 @@ import ( "fmt" "testing" - "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/stretchr/testify/require" ) // https://github.com/pingcap/tidb/issues/45690 func TestGetAnalyzePanicErr(t *testing.T) { - errMsg := fmt.Sprintf("%s", getAnalyzePanicErr(memory.PanicMemoryExceedWarnMsg)) + errMsg := fmt.Sprintf("%s", getAnalyzePanicErr(exeerrors.ErrMemoryExceedForQuery.GenWithStackByArgs(123))) require.NotContains(t, errMsg, `%!(EXTRA`) } diff --git a/pkg/executor/analyze_worker.go b/pkg/executor/analyze_worker.go index bb100327a76de..d92df2a1da266 100644 --- a/pkg/executor/analyze_worker.go +++ b/pkg/executor/analyze_worker.go @@ -16,15 +16,13 @@ package executor import ( "context" - "sync/atomic" - "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/statistics/handle/util" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "go.uber.org/zap" ) @@ -32,19 +30,19 @@ type analyzeSaveStatsWorker struct { resultsCh <-chan *statistics.AnalyzeResults sctx sessionctx.Context errCh chan<- error - killed *uint32 + killer *sqlkiller.SQLKiller } func newAnalyzeSaveStatsWorker( resultsCh <-chan *statistics.AnalyzeResults, sctx sessionctx.Context, errCh chan<- error, - killed *uint32) *analyzeSaveStatsWorker { + killer *sqlkiller.SQLKiller) *analyzeSaveStatsWorker { worker := &analyzeSaveStatsWorker{ resultsCh: resultsCh, sctx: sctx, errCh: errCh, - killed: killed, + killer: killer, } return worker } @@ -57,8 +55,8 @@ func (worker *analyzeSaveStatsWorker) run(ctx context.Context, analyzeSnapshot b } }() for results := range worker.resultsCh { - if atomic.LoadUint32(worker.killed) == 1 { - worker.errCh <- errors.Trace(exeerrors.ErrQueryInterrupted) + if err := worker.killer.HandleSignal(); err != nil { + worker.errCh <- err return } statsHandle := domain.GetDomain(worker.sctx).StatsHandle() diff --git a/pkg/executor/benchmark_test.go b/pkg/executor/benchmark_test.go index 54b7b5f71027d..458ca9250c0f2 100644 --- a/pkg/executor/benchmark_test.go +++ b/pkg/executor/benchmark_test.go @@ -1132,6 +1132,16 @@ func BenchmarkHashJoinExec(b *testing.B) { b.Run(fmt.Sprintf("%v", cas), func(b *testing.B) { benchmarkHashJoinExecWithCase(b, cas) }) + + cols = []*types.FieldType{ + types.NewFieldType(mysql.TypeLonglong), + } + cas = defaultHashJoinTestCase(cols, 0, false) + cas.keyIdx = []int{0} + cas.disk = true + b.Run(fmt.Sprintf("%v", cas), func(b *testing.B) { + benchmarkHashJoinExecWithCase(b, cas) + }) } func BenchmarkOuterHashJoinExec(b *testing.B) { diff --git a/pkg/executor/brie.go b/pkg/executor/brie.go index 44e91899795c9..e40493c3f44e2 100644 --- a/pkg/executor/brie.go +++ b/pkg/executor/brie.go @@ -227,7 +227,7 @@ func (bq *brieQueue) clearTask(sc *stmtctx.StatementContext) { bq.tasks.Range(func(key, value interface{}) bool { item := value.(*brieQueueItem) - if d := currTime.Sub(sc, &item.info.finishTime); d.Compare(outdatedDuration) > 0 { + if d := currTime.Sub(sc.TypeCtx(), &item.info.finishTime); d.Compare(outdatedDuration) > 0 { bq.tasks.Delete(key) } return true @@ -236,7 +236,7 @@ func (bq *brieQueue) clearTask(sc *stmtctx.StatementContext) { func (b *executorBuilder) parseTSString(ts string) (uint64, error) { sc := stmtctx.NewStmtCtxWithTimeZone(b.ctx.GetSessionVars().Location()) - t, err := types.ParseTime(sc, ts, mysql.TypeTimestamp, types.MaxFsp, nil) + t, err := types.ParseTime(sc.TypeCtx(), ts, mysql.TypeTimestamp, types.MaxFsp, nil) if err != nil { return 0, err } @@ -549,7 +549,7 @@ func (e *BRIEExec) Next(ctx context.Context, req *chunk.Chunk) error { for { select { case <-ticker.C: - if atomic.LoadUint32(&e.Ctx().GetSessionVars().Killed) == 1 { + if e.Ctx().GetSessionVars().SQLKiller.HandleSignal() == exeerrors.ErrQueryInterrupted { bq.cancelTask(taskID) return } diff --git a/pkg/executor/brie_test.go b/pkg/executor/brie_test.go index 0fbe566d02422..c63bb33ea414e 100644 --- a/pkg/executor/brie_test.go +++ b/pkg/executor/brie_test.go @@ -78,7 +78,7 @@ func TestFetchShowBRIE(t *testing.T) { p.SetParserConfig(parser.ParserConfig{EnableWindowFunction: true, EnableStrictDoubleTypeCheck: true}) stmt, err := p.ParseOneStmt("show backups", "", "") require.NoError(t, err) - plan, _, err := core.BuildLogicalPlanForTest(ctx, sctx, stmt, infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable(), core.MockView()})) + plan, err := core.BuildLogicalPlanForTest(ctx, sctx, stmt, infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable(), core.MockView()})) require.NoError(t, err) schema := plan.Schema() diff --git a/pkg/executor/builder.go b/pkg/executor/builder.go index 8a697a9c5577d..3d19815168d95 100644 --- a/pkg/executor/builder.go +++ b/pkg/executor/builder.go @@ -562,7 +562,9 @@ func buildIdxColsConcatHandleCols(tblInfo *model.TableInfo, indexInfo *model.Ind if tblInfo.IsCommonHandle { for _, c := range pkCols { - columns = append(columns, tblInfo.Columns[c.Offset]) + if model.FindColumnInfo(columns, c.Name.L) == nil { + columns = append(columns, tblInfo.Columns[c.Offset]) + } } return columns } @@ -610,12 +612,12 @@ func (b *executorBuilder) buildRecoverIndex(v *plannercore.RecoverIndex) exec.Ex physicalID: t.Meta().ID, } sessCtx := e.Ctx().GetSessionVars().StmtCtx - e.handleCols = buildHandleColsForExec(sessCtx, tblInfo, index.Meta(), e.columns) + e.handleCols = buildHandleColsForExec(sessCtx, tblInfo, e.columns) return e } func buildHandleColsForExec(sctx *stmtctx.StatementContext, tblInfo *model.TableInfo, - idxInfo *model.IndexInfo, allColInfo []*model.ColumnInfo) plannercore.HandleCols { + allColInfo []*model.ColumnInfo) plannercore.HandleCols { if !tblInfo.IsCommonHandle { extraColPos := len(allColInfo) - 1 intCol := &expression.Column{ @@ -633,8 +635,12 @@ func buildHandleColsForExec(sctx *stmtctx.StatementContext, tblInfo *model.Table } } pkIdx := tables.FindPrimaryIndex(tblInfo) - for i, c := range pkIdx.Columns { - tblCols[c.Offset].Index = len(idxInfo.Columns) + i + for _, c := range pkIdx.Columns { + for j, colInfo := range allColInfo { + if colInfo.Name.L == c.Name.L { + tblCols[c.Offset].Index = j + } + } } return plannercore.NewCommonHandleCols(sctx, tblInfo, pkIdx, tblCols) } @@ -671,7 +677,7 @@ func (b *executorBuilder) buildCleanupIndex(v *plannercore.CleanupIndex) exec.Ex batchSize: 20000, } sessCtx := e.Ctx().GetSessionVars().StmtCtx - e.handleCols = buildHandleColsForExec(sessCtx, tblInfo, index.Meta(), e.columns) + e.handleCols = buildHandleColsForExec(sessCtx, tblInfo, e.columns) return e } @@ -4677,7 +4683,7 @@ func buildKvRangesForIndexJoin(ctx sessionctx.Context, tableID, indexID int64, l memTracker.Consume(int64(2 * cap(kvRanges[0].StartKey) * len(kvRanges))) } if len(tmpDatumRanges) != 0 && memTracker != nil { - memTracker.Consume(2 * int64(len(tmpDatumRanges)) * types.EstimatedMemUsage(tmpDatumRanges[0].LowVal, len(tmpDatumRanges))) + memTracker.Consume(2 * types.EstimatedMemUsage(tmpDatumRanges[0].LowVal, len(tmpDatumRanges))) } if cwc == nil { slices.SortFunc(kvRanges, func(i, j kv.KeyRange) int { diff --git a/pkg/executor/chunk_size_control_test.go b/pkg/executor/chunk_size_control_test.go index e49b3491d2f5e..ac4e9b78f1d93 100644 --- a/pkg/executor/chunk_size_control_test.go +++ b/pkg/executor/chunk_size_control_test.go @@ -73,9 +73,9 @@ func manipulateCluster(cluster testutils.Cluster, splitKeys [][]byte) []uint64 { if len(splitKeys) == 0 { return nil } - region, _, _ := cluster.GetRegionByKey(splitKeys[0]) + region, _, _, _ := cluster.GetRegionByKey(splitKeys[0]) for _, key := range splitKeys { - if r, _, _ := cluster.GetRegionByKey(key); r.Id != region.Id { + if r, _, _, _ := cluster.GetRegionByKey(key); r.Id != region.Id { panic("all split keys should belong to the same region") } } diff --git a/pkg/executor/cluster_table_test.go b/pkg/executor/cluster_table_test.go index 03b929bac1583..373414b13d91d 100644 --- a/pkg/executor/cluster_table_test.go +++ b/pkg/executor/cluster_table_test.go @@ -19,7 +19,6 @@ import ( "fmt" "net" "os" - "strconv" "testing" "time" @@ -308,120 +307,6 @@ func TestSQLDigestTextRetriever(t *testing.T) { require.Equal(t, "", r.SQLDigestsMap[updateDigest.String()]) } -func TestFunctionDecodeSQLDigests(t *testing.T) { - store, dom := testkit.CreateMockStoreAndDomain(t) - srv := createRPCServer(t, dom) - defer srv.Stop() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - tk.MustExec("set global tidb_enable_stmt_summary = 1") - tk.MustQuery("select @@global.tidb_enable_stmt_summary").Check(testkit.Rows("1")) - tk.MustExec("drop table if exists test_func_decode_sql_digests") - tk.MustExec("create table test_func_decode_sql_digests(id int primary key, v int)") - - q1 := "begin" - norm1, digest1 := parser.NormalizeDigest(q1) - q2 := "select @@tidb_current_ts" - norm2, digest2 := parser.NormalizeDigest(q2) - q3 := "select id, v from test_func_decode_sql_digests where id = 1 for update" - norm3, digest3 := parser.NormalizeDigest(q3) - - // TIDB_DECODE_SQL_DIGESTS function doesn't actually do "decoding", instead it queries `statements_summary` and it's - // variations for the corresponding statements. - // Execute the statements so that the queries will be saved into statements_summary table. - tk.MustExec(q1) - // Save the ts to query the transaction from tidb_trx. - ts, err := strconv.ParseUint(tk.MustQuery(q2).Rows()[0][0].(string), 10, 64) - require.NoError(t, err) - require.Greater(t, ts, uint64(0)) - tk.MustExec(q3) - tk.MustExec("rollback") - - // Test statements truncating. - decoded := fmt.Sprintf(`["%s","%s","%s"]`, norm1, norm2, norm3) - digests := fmt.Sprintf(`["%s","%s","%s"]`, digest1, digest2, digest3) - tk.MustQuery("select tidb_decode_sql_digests(?, 0)", digests).Check(testkit.Rows(decoded)) - // The three queries are shorter than truncate length, equal to truncate length and longer than truncate length respectively. - tk.MustQuery("select tidb_decode_sql_digests(?, ?)", digests, len(norm2)).Check(testkit.Rows( - "[\"begin\",\"select @@tidb_current_ts\",\"select `id` , `v` from `...\"]")) - - // Empty array. - tk.MustQuery("select tidb_decode_sql_digests('[]')").Check(testkit.Rows("[]")) - - // NULL - tk.MustQuery("select tidb_decode_sql_digests(null)").Check(testkit.Rows("")) - - // Array containing wrong types and not-existing digests (maps to null). - tk.MustQuery("select tidb_decode_sql_digests(?)", fmt.Sprintf(`["%s",1,null,"%s",{"a":1},[2],"%s","","abcde"]`, digest1, digest2, digest3)). - Check(testkit.Rows(fmt.Sprintf(`["%s",null,null,"%s",null,null,"%s",null,null]`, norm1, norm2, norm3))) - - // Not JSON array (throws warnings) - tk.MustQuery(`select tidb_decode_sql_digests('{"a":1}')`).Check(testkit.Rows("")) - tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: '{"a":1}'`)) - tk.MustQuery(`select tidb_decode_sql_digests('aabbccdd')`).Check(testkit.Rows("")) - tk.MustQuery(`show warnings`).Check(testkit.Rows(`Warning 1210 The argument can't be unmarshalled as JSON array: 'aabbccdd'`)) - - // Invalid argument count. - tk.MustGetErrCode("select tidb_decode_sql_digests('a', 1, 2)", 1582) - tk.MustGetErrCode("select tidb_decode_sql_digests()", 1582) -} - -func TestFunctionDecodeSQLDigestsPrivilege(t *testing.T) { - store, dom := testkit.CreateMockStoreAndDomain(t) - srv := createRPCServer(t, dom) - defer srv.Stop() - - dropUserTk := testkit.NewTestKit(t, store) - require.NoError(t, dropUserTk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - - tk := testkit.NewTestKit(t, store) - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - tk.MustExec("create user 'testuser'@'localhost'") - defer dropUserTk.MustExec("drop user 'testuser'@'localhost'") - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "testuser", Hostname: "localhost"}, nil, nil, nil)) - tk.MustGetErrMsg("select tidb_decode_sql_digests('[\"aa\"]')", "[expression:1227]Access denied; you need (at least one of) the PROCESS privilege(s) for this operation") - - tk = testkit.NewTestKit(t, store) - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - tk.MustExec("create user 'testuser2'@'localhost'") - defer dropUserTk.MustExec("drop user 'testuser2'@'localhost'") - tk.MustExec("grant process on *.* to 'testuser2'@'localhost'") - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "testuser2", Hostname: "localhost"}, nil, nil, nil)) - tk.MustExec("select tidb_decode_sql_digests('[\"aa\"]')") -} - -func TestFunctionEncodeSQLDigest(t *testing.T) { - store, dom := testkit.CreateMockStoreAndDomain(t) - srv := createRPCServer(t, dom) - defer srv.Stop() - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - tk.MustExec("drop table if exists test_func_encode_sql_digest") - tk.MustExec("create table test_func_encode_sql_digest(id int primary key, v int)") - - q1 := "begin" - digest1 := parser.DigestHash(q1) - q2 := "select @@tidb_current_ts" - digest2 := parser.DigestHash(q2) - q3 := "select id, v from test_func_decode_sql_digests where id = 1 for update" - digest3 := parser.DigestHash(q3) - - tk.MustQuery(fmt.Sprintf("select tidb_encode_sql_digest(\"%s\")", q1)).Check(testkit.Rows(digest1.String())) - tk.MustQuery(fmt.Sprintf("select tidb_encode_sql_digest(\"%s\")", q2)).Check(testkit.Rows(digest2.String())) - tk.MustQuery(fmt.Sprintf("select tidb_encode_sql_digest(\"%s\")", q3)).Check(testkit.Rows(digest3.String())) - - tk.MustQuery("select tidb_encode_sql_digest(null)").Check(testkit.Rows("")) - tk.MustGetErrCode("select tidb_encode_sql_digest()", 1582) - - tk.MustQuery("select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 1;')").Check(testkit.Rows("1")) - tk.MustQuery("select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 1 ;')").Check(testkit.Rows("1")) - tk.MustQuery("select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 2 ;')").Check(testkit.Rows("1")) -} - func prepareLogs(t *testing.T, logData []string, fileNames []string) { for i, log := range logData { f, err := os.OpenFile(fileNames[i], os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) diff --git a/pkg/executor/compact_table_test.go b/pkg/executor/compact_table_test.go index eae058bf4f78b..6889692b4a812 100644 --- a/pkg/executor/compact_table_test.go +++ b/pkg/executor/compact_table_test.go @@ -55,80 +55,6 @@ func withMockTiFlash(nodes int) mockstore.MockTiKVStoreOption { ) } -func TestCompactUnknownTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - err := tk.ExecToErr(`alter table test compact tiflash replica;`) - require.Equal(t, "[planner:1046]No database selected", err.Error()) - - err = tk.ExecToErr(`alter table test.foo compact tiflash replica;`) - require.Equal(t, "[schema:1146]Table 'test.foo' doesn't exist", err.Error()) - - tk.MustExec("use test") - err = tk.ExecToErr(`alter table bar compact;`) - require.Equal(t, "[schema:1146]Table 'test.bar' doesn't exist", err.Error()) -} - -func TestCompactTableNoTiFlashReplica(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.MustExec("create table t(a int)") - tk.MustExec(`alter table t compact tiflash replica;`) - tk.MustQuery(`show warnings;`).Check(testkit.Rows( - `Warning 1105 compact skipped: no tiflash replica in the table`, - )) - - tk.MustExec(`alter table test.t compact;`) - tk.MustQuery(`show warnings;`).Check(testkit.Rows( - `Warning 1105 compact skipped: no tiflash replica in the table`, - )) - - tk = testkit.NewTestKit(t, store) - tk.MustExec(`alter table test.t compact;`) - tk.MustQuery(`show warnings;`).Check(testkit.Rows( - `Warning 1105 compact skipped: no tiflash replica in the table`, - )) -} - -func TestCompactTableNoPartition(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.MustExec("create table t(a int)") - _, err := tk.Exec("alter table t compact partition p1,p2 tiflash replica;") - require.NotNil(t, err) - require.Equal(t, "table:t is not a partition table, but user specify partition name list:[p1 p2]", err.Error()) -} - -func TestCompactTablePartitionInvalid(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.MustExec(` - CREATE TABLE t ( - id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, - fname VARCHAR(25) NOT NULL, - lname VARCHAR(25) NOT NULL, - store_id INT NOT NULL, - department_id INT NOT NULL - ) - PARTITION BY RANGE(id) ( - PARTITION p0 VALUES LESS THAN (5), - PARTITION p1 VALUES LESS THAN (10), - PARTITION p2 VALUES LESS THAN (15), - PARTITION p3 VALUES LESS THAN MAXVALUE - ); - `) - _, err := tk.Exec("alter table t compact partition p1,p2,p4 tiflash replica;") - require.NotNil(t, err) - require.Equal(t, "[table:1735]Unknown partition 'p4' in table 't'", err.Error()) -} - func TestCompactTableTooBusy(t *testing.T) { mocker := newCompactRequestMocker(t) mocker.MockFrom(`tiflash0/#1`, func(req *kvrpcpb.CompactRequest) (*kvrpcpb.CompactResponse, error) { diff --git a/pkg/executor/compiler.go b/pkg/executor/compiler.go index fd724588763b6..1f1bc6c690c4b 100644 --- a/pkg/executor/compiler.go +++ b/pkg/executor/compiler.go @@ -16,9 +16,7 @@ package executor import ( "context" - "strings" - "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/metrics" @@ -29,8 +27,8 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/sessiontxn/staleread" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/tracing" "go.uber.org/zap" ) @@ -50,10 +48,14 @@ func (c *Compiler) Compile(ctx context.Context, stmtNode ast.StmtNode) (_ *ExecS if r == nil { return } - if str, ok := r.(string); !ok || !strings.Contains(str, memory.PanicMemoryExceedWarnMsg) { + if recoveredErr, ok := r.(error); !ok || !(exeerrors.ErrMemoryExceedForQuery.Equal(recoveredErr) || + exeerrors.ErrMemoryExceedForInstance.Equal(recoveredErr) || + exeerrors.ErrQueryInterrupted.Equal(recoveredErr) || + exeerrors.ErrMaxExecTimeExceeded.Equal(recoveredErr)) { panic(r) + } else { + err = recoveredErr } - err = errors.Errorf("%v", r) logutil.Logger(ctx).Error("compile SQL panic", zap.String("SQL", stmtNode.Text()), zap.Stack("stack"), zap.Any("recover", r)) }() diff --git a/pkg/executor/cte.go b/pkg/executor/cte.go index 9928bc652b639..26c3f1e7cdee8 100644 --- a/pkg/executor/cte.go +++ b/pkg/executor/cte.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/variable" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/cteutil" @@ -335,7 +336,7 @@ func (p *cteProducer) produce(ctx context.Context, cteExec *CTEExec) (err error) func (p *cteProducer) computeSeedPart(ctx context.Context) (err error) { defer func() { if r := recover(); r != nil && err == nil { - err = errors.Errorf("%v", r) + err = util.GetRecoverError(r) } }() failpoint.Inject("testCTESeedPanic", nil) @@ -374,7 +375,7 @@ func (p *cteProducer) computeSeedPart(ctx context.Context) (err error) { func (p *cteProducer) computeRecursivePart(ctx context.Context) (err error) { defer func() { if r := recover(); r != nil && err == nil { - err = errors.Errorf("%v", r) + err = util.GetRecoverError(r) } }() failpoint.Inject("testCTERecursivePanic", nil) @@ -633,13 +634,10 @@ func (p *cteProducer) checkHasDup(probeKey uint64, curChk *chunk.Chunk, storage cteutil.Storage, hashTbl baseHashTable) (hasDup bool, err error) { - ptrs := hashTbl.Get(probeKey) + entry := hashTbl.Get(probeKey) - if len(ptrs) == 0 { - return false, nil - } - - for _, ptr := range ptrs { + for ; entry != nil; entry = entry.next { + ptr := entry.ptr var matchedRow chunk.Row if curChk != nil { matchedRow = curChk.GetRow(int(ptr.RowIdx)) diff --git a/pkg/executor/cte_test.go b/pkg/executor/cte_test.go index 4133e4401ed97..a8499402ab43e 100644 --- a/pkg/executor/cte_test.go +++ b/pkg/executor/cte_test.go @@ -27,327 +27,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestBasicCTE(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - rows := tk.MustQuery("with recursive cte1 as (" + - "select 1 c1 " + - "union all " + - "select c1 + 1 c1 from cte1 where c1 < 5) " + - "select * from cte1") - rows.Check(testkit.Rows("1", "2", "3", "4", "5")) - - // Two seed parts. - rows = tk.MustQuery("with recursive cte1 as (" + - "select 1 c1 " + - "union all " + - "select 2 c1 " + - "union all " + - "select c1 + 1 c1 from cte1 where c1 < 10) " + - "select * from cte1 order by c1") - rows.Check(testkit.Rows("1", "2", "2", "3", "3", "4", "4", "5", "5", "6", "6", "7", "7", "8", "8", "9", "9", "10", "10")) - - // Two recursive parts. - rows = tk.MustQuery("with recursive cte1 as (" + - "select 1 c1 " + - "union all " + - "select 2 c1 " + - "union all " + - "select c1 + 1 c1 from cte1 where c1 < 3 " + - "union all " + - "select c1 + 2 c1 from cte1 where c1 < 5) " + - "select * from cte1 order by c1") - rows.Check(testkit.Rows("1", "2", "2", "3", "3", "3", "4", "4", "5", "5", "5", "6", "6")) - - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(a int);") - tk.MustExec("insert into t1 values(1);") - tk.MustExec("insert into t1 values(2);") - rows = tk.MustQuery("SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0) SELECT * FROM qn WHERE b=a);") - rows.Check(testkit.Rows("1")) - rows = tk.MustQuery("SELECT * FROM t1 dt WHERE EXISTS( WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0 or b = 1) SELECT * FROM qn WHERE b=a );") - rows.Check(testkit.Rows("1", "2")) - - rows = tk.MustQuery("with recursive c(p) as (select 1), cte(a, b) as (select 1, 1 union select a+1, 1 from cte, c where a < 5) select * from cte order by 1, 2;") - rows.Check(testkit.Rows("1 1", "2 1", "3 1", "4 1", "5 1")) -} - -func TestUnionDistinct(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - - // Basic test. UNION/UNION ALL intersects. - rows := tk.MustQuery("with recursive cte1(c1) as (select 1 union select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2", "3")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union all select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2", "3")) - - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 int, c2 int);") - tk.MustExec("insert into t1 values(1, 1), (1, 2), (2, 2);") - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from t1) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2", "3")) - - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 int);") - tk.MustExec("insert into t1 values(1), (1), (1), (2), (2), (2);") - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 4) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2", "3", "4")) -} - -func TestCTEMaxRecursionDepth(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - - tk.MustExec("set @@cte_max_recursion_depth = -1;") - err := tk.QueryToErr("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 100) select * from cte1;") - require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value") - // If there is no recursive part, query runs ok. - rows := tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2")) - rows = tk.MustQuery("with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2")) - - tk.MustExec("set @@cte_max_recursion_depth = 0;") - err = tk.QueryToErr("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1;") - require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value") - err = tk.QueryToErr("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1;") - require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value") - // If there is no recursive part, query runs ok. - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2")) - rows = tk.MustQuery("with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2")) - - tk.MustExec("set @@cte_max_recursion_depth = 1;") - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1;") - rows.Check(testkit.Rows("1")) - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1;") - rows.Check(testkit.Rows("1")) - err = tk.QueryToErr("with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 2) select * from cte1;") - require.EqualError(t, err, "[executor:3636]Recursive query aborted after 2 iterations. Try increasing @@cte_max_recursion_depth to a larger value") - // If there is no recursive part, query runs ok. - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2")) - rows = tk.MustQuery("with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1;") - rows.Check(testkit.Rows("1", "2")) -} - -func TestCTEWithLimit(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - - // Basic recursive tests. - rows := tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 0) select * from cte1") - rows.Check(testkit.Rows("1", "2", "3", "4", "5")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 1) select * from cte1") - rows.Check(testkit.Rows("2", "3", "4", "5", "6")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 10) select * from cte1") - rows.Check(testkit.Rows("11", "12", "13", "14", "15")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 995) select * from cte1") - rows.Check(testkit.Rows("996", "997", "998", "999", "1000")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 6) select * from cte1;") - rows.Check(testkit.Rows("7", "8", "9", "10", "11")) - - // Test with cte_max_recursion_depth - tk.MustExec("set cte_max_recursion_depth=2;") - rows = tk.MustQuery("with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1;") - rows.Check(testkit.Rows("2")) - - err := tk.QueryToErr("with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1;") - require.EqualError(t, err, "[executor:3636]Recursive query aborted after 3 iterations. Try increasing @@cte_max_recursion_depth to a larger value") - - tk.MustExec("set cte_max_recursion_depth=1000;") - rows = tk.MustQuery("with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 996) select * from cte1;") - rows.Check(testkit.Rows("996", "997", "998", "999", "1000")) - - err = tk.QueryToErr("with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 997) select * from cte1;") - require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1001 iterations. Try increasing @@cte_max_recursion_depth to a larger value") - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1") - rows.Check(testkit.Rows()) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 10) select * from cte1") - rows.Check(testkit.Rows()) - - // Test join. - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 order by dt1.c1, dt2.c1;") - rows.Check(testkit.Rows("2 2", "2 3", "3 2", "3 3")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1 order by dt1.c1, dt1.c1;") - rows.Check(testkit.Rows("2 2", "3 3")) - - // Test subquery. - // Different with mysql, maybe it's mysql bug?(https://bugs.mysql.com/bug.php?id=103890&thanks=4) - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where c1 in (select 2);") - rows.Check(testkit.Rows("2")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 dt where c1 in (select c1 from cte1 where 1 = dt.c1 - 1);") - rows.Check(testkit.Rows("2")) - - // Test Apply. - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where cte1.c1 = (select dt1.c1 from cte1 dt1 where dt1.c1 = cte1.c1);") - rows.Check(testkit.Rows("2", "3")) - - // Recursive tests with table. - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 int);") - tk.MustExec("insert into t1 values(1), (2), (3);") - - // Error: ERROR 1221 (HY000): Incorrect usage of UNION and LIMIT. - // Limit can only be at the end of SQL stmt. - err = tk.ExecToErr("with recursive cte1(c1) as (select c1 from t1 limit 1 offset 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1") - require.EqualError(t, err, "[planner:1221]Incorrect usage of UNION and LIMIT") - - // Basic non-recusive tests. - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 1 offset 1) select * from cte1") - rows.Check(testkit.Rows("2")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 0 offset 1) select * from cte1") - rows.Check(testkit.Rows()) - - rows = tk.MustQuery("with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 2 offset 0) select * from cte1") - rows.Check(testkit.Rows("1", "2")) - - // Test with table. - tk.MustExec("drop table if exists t1;") - insertStr := "insert into t1 values(0)" - for i := 1; i < 300; i++ { - insertStr += fmt.Sprintf(", (%d)", i) - } - - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 int);") - tk.MustExec(insertStr) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1) select * from cte1") - rows.Check(testkit.Rows("0")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1 offset 100) select * from cte1") - rows.Check(testkit.Rows("100")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 5 offset 100) select * from cte1") - rows.Check(testkit.Rows("100", "101", "102", "103", "104")) - - // Basic non-recursive tests. - rows = tk.MustQuery("with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1") - rows.Check(testkit.Rows("1", "2")) - - rows = tk.MustQuery("with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1") - rows.Check(testkit.Rows("1 1", "2 2")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1") - rows.Check(testkit.Rows()) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1") - rows.Check(testkit.Rows()) - - // rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select 2 limit 5 offset 100) select * from cte1") - // rows.Check(testkit.Rows("100", "101", "102", "103", "104")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1") - rows.Check(testkit.Rows("100", "101", "102")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1") - rows.Check(testkit.Rows("100 100", "101 101", "102 102")) - - // Test limit 0. - tk.MustExec("set cte_max_recursion_depth = 0;") - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 int);") - tk.MustExec("insert into t1 values(0);") - rows = tk.MustQuery("with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 0) select * from cte1;") - rows.Check(testkit.Rows()) - // MySQL err: ERROR 1365 (22012): Division by 0. Because it gives error when computing 1/c1. - err = tk.QueryToErr("with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 1) select * from cte1;") - require.EqualError(t, err, "[executor:3636]Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value") - - tk.MustExec("set cte_max_recursion_depth = 1000;") - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 int);") - tk.MustExec("insert into t1 values(1), (2), (3);") - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 2) select * from cte1;") - rows.Check(testkit.Rows()) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1;") - rows.Check(testkit.Rows("3")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 2) select * from cte1;") - rows.Check(testkit.Rows("3", "4")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 2) select * from cte1;") - rows.Check(testkit.Rows("3", "4", "5")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 2) select * from cte1;") - rows.Check(testkit.Rows("3", "4", "5", "6")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 3) select * from cte1;") - rows.Check(testkit.Rows()) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1;") - rows.Check(testkit.Rows("4")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 3) select * from cte1;") - rows.Check(testkit.Rows("4", "5")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 3) select * from cte1;") - rows.Check(testkit.Rows("4", "5", "6")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 3) select * from cte1;") - rows.Check(testkit.Rows("4", "5", "6", "7")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 4) select * from cte1;") - rows.Check(testkit.Rows()) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 4) select * from cte1;") - rows.Check(testkit.Rows("5")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 4) select * from cte1;") - rows.Check(testkit.Rows("5", "6")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 4) select * from cte1;") - rows.Check(testkit.Rows("5", "6", "7")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 4) select * from cte1;") - rows.Check(testkit.Rows("5", "6", "7", "8")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 2) select * from cte1;") - rows.Check(testkit.Rows()) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 2) select * from cte1;") - rows.Check(testkit.Rows("3")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 2) select * from cte1;") - rows.Check(testkit.Rows("3", "2")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 2) select * from cte1;") - rows.Check(testkit.Rows("3", "2", "3")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 2) select * from cte1;") - rows.Check(testkit.Rows("3", "2", "3", "4")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 3) select * from cte1;") - rows.Check(testkit.Rows()) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 3) select * from cte1;") - rows.Check(testkit.Rows("2")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 3) select * from cte1;") - rows.Check(testkit.Rows("2", "3")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 3) select * from cte1;") - rows.Check(testkit.Rows("2", "3", "4")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 3) select * from cte1;") - rows.Check(testkit.Rows("2", "3", "4", "3")) - - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 4) select * from cte1;") - rows.Check(testkit.Rows()) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 4) select * from cte1;") - rows.Check(testkit.Rows("3")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 4) select * from cte1;") - rows.Check(testkit.Rows("3", "4")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 4) select * from cte1;") - rows.Check(testkit.Rows("3", "4", "3")) - rows = tk.MustQuery("with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 4) select * from cte1;") - rows.Check(testkit.Rows("3", "4", "3", "4")) -} - func TestSpillToDisk(t *testing.T) { store := testkit.CreateMockStore(t) @@ -431,25 +110,6 @@ func TestCTEExecError(t *testing.T) { } } -// https://github.com/pingcap/tidb/issues/33965. -func TestCTEsInView(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - - tk.MustExec("create database if not exists test1;") - tk.MustExec("create table test.t (a int);") - tk.MustExec("create table test1.t (a int);") - tk.MustExec("insert into test.t values (1);") - tk.MustExec("insert into test1.t values (2);") - - tk.MustExec("use test;") - tk.MustExec("create definer='root'@'localhost' view test.v as with tt as (select * from t) select * from tt;") - tk.MustQuery("select * from test.v;").Check(testkit.Rows("1")) - tk.MustExec("use test1;") - tk.MustQuery("select * from test.v;").Check(testkit.Rows("1")) -} - func TestCTEPanic(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/pkg/executor/ddl.go b/pkg/executor/ddl.go index 9cf577ea31688..3a447d1be551c 100644 --- a/pkg/executor/ddl.go +++ b/pkg/executor/ddl.go @@ -393,10 +393,11 @@ func (e *DDLExec) executeRecoverTable(s *ast.RecoverTableStmt) error { var job *model.Job var err error var tblInfo *model.TableInfo - if s.JobID != 0 { - job, tblInfo, err = e.getRecoverTableByJobID(s, dom) - } else { + // Let check table first. Related isssue #46296. + if s.Table != nil { job, tblInfo, err = e.getRecoverTableByTableName(s.Table) + } else { + job, tblInfo, err = e.getRecoverTableByJobID(s, dom) } if err != nil { return err diff --git a/pkg/executor/distsql.go b/pkg/executor/distsql.go index 9f481556b128f..7d4cfc9c3ab15 100644 --- a/pkg/executor/distsql.go +++ b/pkg/executor/distsql.go @@ -980,7 +980,7 @@ func (w *indexWorker) fetchHandles(ctx context.Context, results []distsql.Select defer func() { if r := recover(); r != nil { logutil.Logger(ctx).Error("indexWorker in IndexLookupExecutor panicked", zap.Any("recover", r), zap.Stack("stack")) - err4Panic := errors.Errorf("%v", r) + err4Panic := util.GetRecoverError(r) w.syncErr(err4Panic) if err != nil { err = errors.Trace(err4Panic) @@ -1168,7 +1168,8 @@ func (w *tableWorker) pickAndExecTask(ctx context.Context) { defer func() { if r := recover(); r != nil { logutil.Logger(ctx).Error("tableWorker in IndexLookUpExecutor panicked", zap.Any("recover", r), zap.Stack("stack")) - task.doneCh <- errors.Errorf("%v", r) + err := util.GetRecoverError(r) + task.doneCh <- err } }() for { diff --git a/pkg/executor/distsql_test.go b/pkg/executor/distsql_test.go index 04490e46ea3ef..b50d15e8ea060 100644 --- a/pkg/executor/distsql_test.go +++ b/pkg/executor/distsql_test.go @@ -103,7 +103,7 @@ func TestCopClientSend(t *testing.T) { // Split one region. key := tablecodec.EncodeRowKeyWithHandle(tblID, kv.IntHandle(500)) - region, _, _ := cluster.GetRegionByKey(key) + region, _, _, _ := cluster.GetRegionByKey(key) peerID := cluster.AllocID() cluster.Split(region.GetId(), cluster.AllocID(), key, []uint64{peerID}, peerID) diff --git a/pkg/executor/executor.go b/pkg/executor/executor.go index 54406e2b1495f..686950e01207a 100644 --- a/pkg/executor/executor.go +++ b/pkg/executor/executor.go @@ -1211,7 +1211,7 @@ func newLockCtx(sctx sessionctx.Context, lockWaitTime int64, numKeys int) (*tikv return nil, err } lockCtx := tikvstore.NewLockCtx(forUpdateTS, lockWaitTime, seVars.StmtCtx.GetLockWaitStartTime()) - lockCtx.Killed = &seVars.Killed + lockCtx.Killed = &seVars.SQLKiller.Signal lockCtx.PessimisticLockWaited = &seVars.StmtCtx.PessimisticLockWaited lockCtx.LockKeysDuration = &seVars.StmtCtx.LockKeysDuration lockCtx.LockKeysCount = &seVars.StmtCtx.LockKeysCount @@ -1844,7 +1844,7 @@ func (e *UnionExec) resultPuller(ctx context.Context, workerID int) { defer func() { if r := recover(); r != nil { logutil.Logger(ctx).Error("resultPuller panicked", zap.Any("recover", r), zap.Stack("stack")) - result.err = errors.Errorf("%v", r) + result.err = util.GetRecoverError(r) e.resultPool <- result e.stopFetchData.Store(true) } @@ -1993,6 +1993,10 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { vars.DiskTracker.Detach() vars.DiskTracker.ResetMaxConsumed() vars.MemTracker.SessionID.Store(vars.ConnectionID) + vars.MemTracker.Killer = &vars.SQLKiller + vars.DiskTracker.Killer = &vars.SQLKiller + vars.SQLKiller.Reset() + vars.SQLKiller.ConnID = vars.ConnectionID vars.StmtCtx.TableStats = make(map[int64]interface{}) isAnalyze := false @@ -2015,7 +2019,7 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { logOnQueryExceedMemQuota := domain.GetDomain(ctx).ExpensiveQueryHandle().LogOnQueryExceedMemQuota switch variable.OOMAction.Load() { case variable.OOMActionCancel: - action := &memory.PanicOnExceed{ConnID: vars.ConnectionID} + action := &memory.PanicOnExceed{ConnID: vars.ConnectionID, Killer: vars.MemTracker.Killer} action.SetLogHook(logOnQueryExceedMemQuota) vars.MemTracker.SetActionOnExceed(action) case variable.OOMActionLog: @@ -2098,16 +2102,22 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.IgnoreNoPartition = stmt.IgnoreErr sc.ErrAutoincReadFailedAsWarning = stmt.IgnoreErr sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() - sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate sc.Priority = stmt.Priority - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr)) + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || + !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || + vars.SQLMode.HasAllowInvalidDatesMode())) case *ast.CreateTableStmt, *ast.AlterTableStmt: sc.InCreateOrAlterStmt = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() - sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.StrictSQLMode || sc.AllowInvalidDate - sc.NoZeroDate = vars.SQLMode.HasNoZeroDateMode() - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(!vars.StrictSQLMode)) + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.StrictSQLMode || + vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroDateErr(!vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode)) + case *ast.LoadDataStmt: sc.InLoadDataStmt = true // return warning instead of error when load data meet no partition for value @@ -2122,9 +2132,10 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { sc.OverflowAsWarning = true // Return warning for truncate error in selection. - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) if opts := stmt.SelectStmtOpts; opts != nil { sc.Priority = opts.Priority sc.NotFillCache = !opts.SQLCache @@ -2133,40 +2144,45 @@ func ResetContextOfStmt(ctx sessionctx.Context, s ast.StmtNode) (err error) { case *ast.SetOprStmt: sc.InSelectStmt = true sc.OverflowAsWarning = true - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) case *ast.ShowStmt: - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) if stmt.Tp == ast.ShowWarnings || stmt.Tp == ast.ShowErrors || stmt.Tp == ast.ShowSessionStates { sc.InShowWarning = true sc.SetWarnings(vars.StmtCtx.GetWarnings()) } case *ast.SplitRegionStmt: - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(false)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(false). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) case *ast.SetSessionStatesStmt: sc.InSetSessionStatesStmt = true - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) default: - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.IgnoreZeroInDate = true - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() + sc.SetTypeFlags(sc.TypeFlags(). + WithIgnoreTruncateErr(true). + WithIgnoreZeroInDate(true). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode())) } sc.SetTypeFlags(sc.TypeFlags(). WithSkipUTF8Check(vars.SkipUTF8Check). WithSkipSACIICheck(vars.SkipASCIICheck). WithSkipUTF8MB4Check(!globalConfig.Instance.CheckMb4ValueInUTF8.Load()). - // WithClipNegativeToZero indicates whether values less than 0 should be clipped to 0 for unsigned integer types. + // WithAllowNegativeToUnsigned with false value indicates values less than 0 should be clipped to 0 for unsigned integer types. // This is the case for `insert`, `update`, `alter table`, `create table` and `load data infile` statements, when not in strict SQL mode. // see https://dev.mysql.com/doc/refman/5.7/en/out-of-range-and-overflow.html - WithClipNegativeToZero(sc.InInsertStmt || sc.InLoadDataStmt || sc.InUpdateStmt || sc.InCreateOrAlterStmt), + WithAllowNegativeToUnsigned(!sc.InInsertStmt && !sc.InLoadDataStmt && !sc.InUpdateStmt && !sc.InCreateOrAlterStmt), ) vars.PlanCacheParams.Reset() @@ -2218,11 +2234,13 @@ func ResetUpdateStmtCtx(sc *stmtctx.StatementContext, stmt *ast.UpdateStmt, vars sc.DupKeyAsWarning = stmt.IgnoreErr sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() - sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate sc.Priority = stmt.Priority sc.IgnoreNoPartition = stmt.IgnoreErr - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr)) + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !vars.StrictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) } // ResetDeleteStmtCtx resets statement context for DeleteStmt. @@ -2231,10 +2249,12 @@ func ResetDeleteStmtCtx(sc *stmtctx.StatementContext, stmt *ast.DeleteStmt, vars sc.DupKeyAsWarning = stmt.IgnoreErr sc.BadNullAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr sc.DividedByZeroAsWarning = !vars.StrictSQLMode || stmt.IgnoreErr - sc.AllowInvalidDate = vars.SQLMode.HasAllowInvalidDatesMode() - sc.IgnoreZeroInDate = !vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || !vars.StrictSQLMode || stmt.IgnoreErr || sc.AllowInvalidDate sc.Priority = stmt.Priority - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr)) + sc.SetTypeFlags(sc.TypeFlags(). + WithTruncateAsWarning(!vars.StrictSQLMode || stmt.IgnoreErr). + WithIgnoreInvalidDateErr(vars.SQLMode.HasAllowInvalidDatesMode()). + WithIgnoreZeroInDate(!vars.SQLMode.HasNoZeroInDateMode() || !vars.SQLMode.HasNoZeroDateMode() || + !vars.StrictSQLMode || stmt.IgnoreErr || vars.SQLMode.HasAllowInvalidDatesMode())) } func setOptionForTopSQL(sc *stmtctx.StatementContext, snapshot kv.Snapshot) { diff --git a/pkg/executor/executor_failpoint_test.go b/pkg/executor/executor_failpoint_test.go index 01b4ff86e1469..15d52ccec3dda 100644 --- a/pkg/executor/executor_failpoint_test.go +++ b/pkg/executor/executor_failpoint_test.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/deadlockhistory" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/stretchr/testify/require" ) @@ -236,7 +237,7 @@ func TestTSOFail(t *testing.T) { } func TestKillTableReader(t *testing.T) { - var retry = "github.com/tikv/client-go/v2/locate/mockRetrySendReqToRegion" + var retry = "tikvclient/mockRetrySendReqToRegion" defer func() { require.NoError(t, failpoint.Disable(retry)) }() @@ -248,18 +249,18 @@ func TestKillTableReader(t *testing.T) { tk.MustExec("create table t (a int)") tk.MustExec("insert into t values (1),(2),(3)") tk.MustExec("set @@tidb_distsql_scan_concurrency=1") - atomic.StoreUint32(&tk.Session().GetSessionVars().Killed, 0) + tk.Session().GetSessionVars().SQLKiller.Reset() require.NoError(t, failpoint.Enable(retry, `return(true)`)) wg := &sync.WaitGroup{} wg.Add(1) go func() { defer wg.Done() - time.Sleep(1 * time.Second) - err := tk.QueryToErr("select * from t") - require.Error(t, err) - require.Equal(t, int(exeerrors.ErrQueryInterrupted.Code()), int(terror.ToSQLError(errors.Cause(err).(*terror.Error)).Code)) + time.Sleep(300 * time.Millisecond) + tk.Session().GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) }() - atomic.StoreUint32(&tk.Session().GetSessionVars().Killed, 1) + err := tk.QueryToErr("select * from t") + require.Error(t, err) + require.Equal(t, int(exeerrors.ErrQueryInterrupted.Code()), int(terror.ToSQLError(errors.Cause(err).(*terror.Error)).Code)) wg.Wait() } diff --git a/pkg/executor/executor_pkg_test.go b/pkg/executor/executor_pkg_test.go index 7bdf60a0310eb..45a98caba9258 100644 --- a/pkg/executor/executor_pkg_test.go +++ b/pkg/executor/executor_pkg_test.go @@ -75,6 +75,63 @@ func TestBuildKvRangesForIndexJoinWithoutCwc(t *testing.T) { } } +func TestBuildKvRangesForIndexJoinWithoutCwcAndWithMemoryTracker(t *testing.T) { + indexRanges := make([]*ranger.Range, 0, 6) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 2, 1, 2)) + indexRanges = append(indexRanges, generateIndexRange(1, 1, 3, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 1, 1, 1)) + indexRanges = append(indexRanges, generateIndexRange(2, 1, 2, 1, 1)) + + bytesConsumed1 := int64(0) + { + joinKeyRows := make([]*indexJoinLookUpContent, 0, 10) + for i := int64(0); i < 10; i++ { + joinKeyRows = append(joinKeyRows, &indexJoinLookUpContent{keys: generateDatumSlice(1, i)}) + } + + keyOff2IdxOff := []int{1, 3} + ctx := mock.NewContext() + memTracker := memory.NewTracker(memory.LabelForIndexWorker, -1) + kvRanges, err := buildKvRangesForIndexJoin(ctx, 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, memTracker, nil) + require.NoError(t, err) + // Check the kvRanges is in order. + for i, kvRange := range kvRanges { + require.True(t, kvRange.StartKey.Cmp(kvRange.EndKey) < 0) + if i > 0 { + require.True(t, kvRange.StartKey.Cmp(kvRanges[i-1].EndKey) >= 0) + } + } + bytesConsumed1 = memTracker.BytesConsumed() + } + + bytesConsumed2 := int64(0) + { + joinKeyRows := make([]*indexJoinLookUpContent, 0, 20) + for i := int64(0); i < 20; i++ { + joinKeyRows = append(joinKeyRows, &indexJoinLookUpContent{keys: generateDatumSlice(1, i)}) + } + + keyOff2IdxOff := []int{1, 3} + ctx := mock.NewContext() + memTracker := memory.NewTracker(memory.LabelForIndexWorker, -1) + kvRanges, err := buildKvRangesForIndexJoin(ctx, 0, 0, joinKeyRows, indexRanges, keyOff2IdxOff, nil, memTracker, nil) + require.NoError(t, err) + // Check the kvRanges is in order. + for i, kvRange := range kvRanges { + require.True(t, kvRange.StartKey.Cmp(kvRange.EndKey) < 0) + if i > 0 { + require.True(t, kvRange.StartKey.Cmp(kvRanges[i-1].EndKey) >= 0) + } + } + bytesConsumed2 = memTracker.BytesConsumed() + } + + require.Equal(t, 2*bytesConsumed1, bytesConsumed2) + require.Equal(t, int64(20760), bytesConsumed1) +} + func generateIndexRange(vals ...int64) *ranger.Range { lowDatums := generateDatumSlice(vals...) highDatums := make([]types.Datum, len(vals)) diff --git a/pkg/executor/executor_txn_test.go b/pkg/executor/executor_txn_test.go index d20da73198b8e..bf63b8d072f8c 100644 --- a/pkg/executor/executor_txn_test.go +++ b/pkg/executor/executor_txn_test.go @@ -462,36 +462,6 @@ func TestTxnSavepoint1(t *testing.T) { } } -func TestRollbackToSavepoint(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t(id int, a int, unique index idx(id))") - - tk.MustExec("begin pessimistic") - tk.MustExec("insert into t values (1,1)") - tk.MustExec("savepoint s1") - tk.MustExec("insert into t values (2,2)") - tk.MustExec("rollback to s1") - tk.MustExec("insert into t values (2,2)") - tk.MustQuery("select * from t").Check(testkit.Rows("1 1", "2 2")) - tk.MustExec("rollback to s1") - tk.MustQuery("select * from t").Check(testkit.Rows("1 1")) - tk.MustExec("commit") - tk.MustQuery("select * from t").Check(testkit.Rows("1 1")) - - tk.MustExec("delete from t") - tk.MustExec("insert into t values (1,1)") - tk.MustExec("begin pessimistic") - tk.MustExec("delete from t where id = 1") - tk.MustExec("savepoint s1") - tk.MustExec("insert into t values (1,2)") - tk.MustExec("rollback to s1") - tk.MustQuery("select * from t").Check(testkit.Rows()) - tk.MustExec("commit") - tk.MustQuery("select * from t").Check(testkit.Rows()) -} - func TestRollbackToSavepointReleasePessimisticLock(t *testing.T) { store := testkit.CreateMockStore(t) tk1 := testkit.NewTestKit(t, store) @@ -660,67 +630,6 @@ func TestSavepointInBigTxn(t *testing.T) { tk1.MustQuery("select * from t order by id").Check(testkit.Rows("0 0", "1 1")) } -func TestSavepointRandTestIssue0(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("CREATE TABLE t (a enum('B','C') NOT NULL,UNIQUE KEY idx_1 (a),KEY idx_2 (a));") - tk.MustExec("begin pessimistic") - tk.MustExec("savepoint sp0;") - tk.MustExec("insert ignore into t values ( 'B' ),( 'C' );") - err := tk.ExecToErr("update t set a = 'C' where a = 'B';") - require.Error(t, err) - tk.MustExec("select * from t where a = 'B' for update;") - tk.MustExec("rollback to sp0;") - tk.MustExec("delete from t where a = 'B' ;") -} - -func TestSavepointWithTemporaryTable(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - // Test for local temporary table. - txnModes := []string{"optimistic", "pessimistic", ""} - for _, txnMode := range txnModes { - tk.MustExec(fmt.Sprintf("set session tidb_txn_mode='%v';", txnMode)) - tk.MustExec("drop table if exists tmp1") - tk.MustExec("create temporary table tmp1 (id int primary key auto_increment, u int unique, v int)") - tk.MustExec("insert into tmp1 values(1, 11, 101)") - tk.MustExec("begin") - tk.MustExec("savepoint sp0;") - tk.MustExec("insert into tmp1 values(2, 22, 202)") - tk.MustExec("savepoint sp1;") - tk.MustExec("insert into tmp1 values(3, 33, 303)") - tk.MustExec("rollback to sp1;") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows("1 11 101", "2 22 202")) - tk.MustExec("commit") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows("1 11 101", "2 22 202")) - } - - // Test for global temporary table. - for _, txnMode := range txnModes { - tk.MustExec(fmt.Sprintf("set session tidb_txn_mode='%v';", txnMode)) - tk.MustExec("drop table if exists tmp1") - tk.MustExec("create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows") - tk.MustExec("begin") - tk.MustExec("savepoint sp0;") - tk.MustExec("insert into tmp1 values(2, 22, 202)") - tk.MustExec("savepoint sp1;") - tk.MustExec("insert into tmp1 values(3, 33, 303)") - tk.MustExec("savepoint sp2;") - tk.MustExec("insert into tmp1 values(4, 44, 404)") - tk.MustExec("rollback to sp2;") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows("2 22 202", "3 33 303")) - tk.MustExec("rollback to sp1;") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows("2 22 202")) - tk.MustExec("commit") - tk.MustQuery("select * from tmp1 order by id").Check(testkit.Rows()) - } -} - func TestSavepointWithCacheTable(t *testing.T) { store := testkit.CreateMockStore(t) diff --git a/pkg/executor/explain_test.go b/pkg/executor/explain_test.go index 3c720f836f4e0..cb6048953f701 100644 --- a/pkg/executor/explain_test.go +++ b/pkg/executor/explain_test.go @@ -25,118 +25,12 @@ import ( "time" "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/parser/auth" plannercore "github.com/pingcap/tidb/pkg/planner/core" - "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/types" "github.com/stretchr/testify/require" ) -func TestExplainPrivileges(t *testing.T) { - store := testkit.CreateMockStore(t) - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - tk := testkit.NewTestKit(t, store) - tk.SetSession(se) - - tk.MustExec("create database explaindatabase") - tk.MustExec("use explaindatabase") - tk.MustExec("create table t (id int)") - tk.MustExec("create view v as select * from t") - tk.MustExec(`create user 'explain'@'%'`) - - tk1 := testkit.NewTestKit(t, store) - se, err = session.CreateSession4Test(store) - require.NoError(t, err) - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "explain", Hostname: "%"}, nil, nil, nil)) - tk1.SetSession(se) - - tk.MustExec(`grant select on explaindatabase.v to 'explain'@'%'`) - tk1.MustQuery("show databases").Check(testkit.Rows("INFORMATION_SCHEMA", "explaindatabase")) - - tk1.MustExec("use explaindatabase") - tk1.MustQuery("select * from v") - err = tk1.ExecToErr("explain format = 'brief' select * from v") - require.Equal(t, plannercore.ErrViewNoExplain.Error(), err.Error()) - - tk.MustExec(`grant show view on explaindatabase.v to 'explain'@'%'`) - tk1.MustQuery("explain format = 'brief' select * from v") - - tk.MustExec(`revoke select on explaindatabase.v from 'explain'@'%'`) - - err = tk1.ExecToErr("explain format = 'brief' select * from v") - require.Equal(t, plannercore.ErrTableaccessDenied.GenWithStackByArgs("SELECT", "explain", "%", "v").Error(), err.Error()) - - // https://github.com/pingcap/tidb/issues/34326 - tk.MustExec("create table t1 (i int)") - tk.MustExec("create table t2 (j int)") - tk.MustExec("create table t3 (k int, secret int)") - - tk.MustExec("create view v1 as select * from t1") - tk.MustExec("create view v2 as select * from v1, t2") - tk.MustExec("create view v3 as select k from t3") - - tk.MustExec("grant select, show view on explaindatabase.v2 to 'explain'@'%'") - tk.MustExec("grant show view on explaindatabase.v1 to 'explain'@'%'") - tk.MustExec("grant select, show view on explaindatabase.t3 to 'explain'@'%'") - tk.MustExec("grant select, show view on explaindatabase.v3 to 'explain'@'%'") - - tk1.MustGetErrMsg("explain select * from v1", "[planner:1142]SELECT command denied to user 'explain'@'%' for table 'v1'") - tk1.MustGetErrCode("explain select * from v2", errno.ErrViewNoExplain) - tk1.MustQuery("explain select * from t3") - tk1.MustQuery("explain select * from v3") -} - -func TestExplainCartesianJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (v int)") - - cases := []struct { - sql string - isCartesianJoin bool - }{ - {"explain format = 'brief' select * from t t1, t t2", true}, - {"explain format = 'brief' select * from t t1 where exists (select 1 from t t2 where t2.v > t1.v)", true}, - {"explain format = 'brief' select * from t t1 where exists (select 1 from t t2 where t2.v in (t1.v+1, t1.v+2))", true}, - {"explain format = 'brief' select * from t t1, t t2 where t1.v = t2.v", false}, - } - for _, ca := range cases { - rows := tk.MustQuery(ca.sql).Rows() - ok := false - for _, row := range rows { - str := fmt.Sprintf("%v", row) - if strings.Contains(str, "CARTESIAN") { - ok = true - } - } - - require.Equal(t, ca.isCartesianJoin, ok) - } -} - -func TestExplainWrite(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int)") - tk.MustQuery("explain analyze insert into t select 1") - tk.MustQuery("select * from t").Check(testkit.Rows("1")) - tk.MustQuery("explain analyze update t set a=2 where a=1") - tk.MustQuery("select * from t").Check(testkit.Rows("2")) - tk.MustQuery("explain format = 'brief' insert into t select 1") - tk.MustQuery("select * from t").Check(testkit.Rows("2")) - tk.MustQuery("explain analyze insert into t select 1") - tk.MustQuery("explain analyze replace into t values (3)") - tk.MustQuery("select * from t order by a").Check(testkit.Rows("1", "2", "3")) -} - func TestExplainAnalyzeMemory(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -413,43 +307,6 @@ func TestExplainAnalyzeCTEMemoryAndDiskInfo(t *testing.T) { require.NotEqual(t, "N/A", rows[4][8].(string)) } -func TestExplainStatementsSummary(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustQuery("desc select * from information_schema.statements_summary").Check(testkit.Rows( - `MemTableScan_4 10000.00 root table:STATEMENTS_SUMMARY `)) - tk.MustQuery("desc select * from information_schema.statements_summary where digest is null").Check(testkit.RowsWithSep("|", - `Selection_5|8000.00|root| isnull(Column#5)`, `└─MemTableScan_6|10000.00|root|table:STATEMENTS_SUMMARY|`)) - tk.MustQuery("desc select * from information_schema.statements_summary where digest = 'abcdefg'").Check(testkit.RowsWithSep(" ", - `MemTableScan_5 10000.00 root table:STATEMENTS_SUMMARY digests: ["abcdefg"]`)) - tk.MustQuery("desc select * from information_schema.statements_summary where digest in ('a','b','c')").Check(testkit.RowsWithSep(" ", - `MemTableScan_5 10000.00 root table:STATEMENTS_SUMMARY digests: ["a","b","c"]`)) -} - -func TestFix29401(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists tt123;") - tk.MustExec(`CREATE TABLE tt123 ( - id int(11) NOT NULL, - a bigint(20) DEFAULT NULL, - b char(20) DEFAULT NULL, - c datetime DEFAULT NULL, - d double DEFAULT NULL, - e json DEFAULT NULL, - f decimal(40,6) DEFAULT NULL, - PRIMARY KEY (id) /*T![clustered_index] CLUSTERED */, - KEY a (a), - KEY b (b), - KEY c (c), - KEY d (d), - KEY f (f) -) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;`) - tk.MustExec(" explain select /*+ inl_hash_join(t1) */ * from tt123 t1 join tt123 t2 on t1.b=t2.e;") -} - func TestIssue35296AndIssue43024(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -507,18 +364,6 @@ func TestIssue35911(t *testing.T) { require.EqualValues(t, 5, concurrency) } -func TestIssue35105(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int primary key)") - tk.MustExec("insert into t values (2)") - tk.MustExec("set @@tidb_constraint_check_in_place=1") - require.Error(t, tk.ExecToErr("explain analyze insert into t values (1), (2), (3)")) - tk.MustQuery("select * from t").Check(testkit.Rows("2")) -} - func flatJSONPlan(j *plannercore.ExplainInfoForEncode) (res []*plannercore.ExplainInfoForEncode) { if j == nil { return @@ -649,56 +494,3 @@ func TestExplainFormatInCtx(t *testing.T) { } } } - -func TestExplainFormatPlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - tk.MustExec("set @@session.tidb_enable_non_prepared_plan_cache = 1") - tk.MustExec("select * from t limit 1") - tk.MustExec("select * from t limit 1") - - // miss - tk.MustExec("explain format = 'plan_cache' select * from (select * from t) t1 limit 1") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip non-prepared plan-cache: queries that have sub-queries are not supported")) - tk.MustExec("explain format = 'plan_cache' select * from (select * from t) t1 limit 1") - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - - tk.MustExec("explain analyze format = 'plan_cache' select * from (select * from t) t1 limit 1") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip non-prepared plan-cache: queries that have sub-queries are not supported")) - tk.MustExec("explain analyze format = 'plan_cache' select * from (select * from t) t1 limit 1") - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - - // hit - tk.MustExec("explain format = 'plan_cache' select * from t") - tk.MustQuery("show warnings").Check(testkit.Rows()) - tk.MustExec("explain format = 'plan_cache' select * from t") - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - - tk.MustExec("explain analyze format = 'plan_cache' select * from t") - tk.MustQuery("show warnings").Check(testkit.Rows()) - tk.MustExec("explain analyze format = 'plan_cache' select * from t") - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - - // will not use plan cache - explainFormats := []string{ - types.ExplainFormatBrief, - types.ExplainFormatDOT, - types.ExplainFormatHint, - types.ExplainFormatROW, - types.ExplainFormatVerbose, - types.ExplainFormatTraditional, - types.ExplainFormatBinary, - types.ExplainFormatTiDBJSON, - types.ExplainFormatCostTrace, - } - - tk.MustExec("explain select * from t") - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - for _, format := range explainFormats { - tk.MustExec(fmt.Sprintf("explain format = '%v' select * from t", format)) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) - } -} diff --git a/pkg/executor/explainfor_test.go b/pkg/executor/explainfor_test.go index a98697852ac2f..18e8e4e721ed1 100644 --- a/pkg/executor/explainfor_test.go +++ b/pkg/executor/explainfor_test.go @@ -167,63 +167,6 @@ func TestIssue11124(t *testing.T) { } } -func TestExplainMemTablePredicate(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("desc select * from METRICS_SCHEMA.tidb_query_duration where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13' ").Check(testkit.Rows( - "MemTableScan_5 10000.00 root table:tidb_query_duration PromQL:histogram_quantile(0.9, sum(rate(tidb_server_handle_query_duration_seconds_bucket{}[60s])) by (le,sql_type,instance)), start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, step:1m0s")) - tk.MustQuery("desc select * from METRICS_SCHEMA.up where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13' ").Check(testkit.Rows( - "MemTableScan_5 10000.00 root table:up PromQL:up{}, start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, step:1m0s")) - tk.MustQuery("desc select * from information_schema.cluster_log where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows( - "MemTableScan_5 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13")) - tk.MustQuery("desc select * from information_schema.cluster_log where level in ('warn','error') and time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, log_levels:["error","warn"]`)) - tk.MustQuery("desc select * from information_schema.cluster_log where type in ('high_cpu_1','high_memory_1') and time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, node_types:["high_cpu_1","high_memory_1"]`)) - tk.MustQuery("desc select * from information_schema.slow_query").Check(testkit.Rows( - "MemTableScan_4 10000.00 root table:SLOW_QUERY only search in the current 'tidb-slow.log' file")) - tk.MustQuery("desc select * from information_schema.slow_query where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows( - "MemTableScan_5 10000.00 root table:SLOW_QUERY start_time:2019-12-23 16:10:13.000000, end_time:2019-12-23 16:30:13.000000")) - tk.MustExec("set @@time_zone = '+00:00';") - tk.MustQuery("desc select * from information_schema.slow_query where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'").Check(testkit.Rows( - "MemTableScan_5 10000.00 root table:SLOW_QUERY start_time:2019-12-23 16:10:13.000000, end_time:2019-12-23 16:30:13.000000")) -} - -func TestExplainClusterTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("desc select * from information_schema.cluster_config where type in ('tikv', 'tidb')").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:CLUSTER_CONFIG node_types:["tidb","tikv"]`)) - tk.MustQuery("desc select * from information_schema.cluster_config where instance='192.168.1.7:2379'").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:CLUSTER_CONFIG instances:["192.168.1.7:2379"]`)) - tk.MustQuery("desc select * from information_schema.cluster_config where type='tidb' and instance='192.168.1.7:2379'").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:CLUSTER_CONFIG node_types:["tidb"], instances:["192.168.1.7:2379"]`)) -} - -func TestInspectionResultTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("desc select * from information_schema.inspection_result where rule = 'ddl' and rule = 'config'").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:INSPECTION_RESULT skip_inspection:true`)) - tk.MustQuery("desc select * from information_schema.inspection_result where rule in ('ddl', 'config')").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:INSPECTION_RESULT rules:["config","ddl"], items:[]`)) - tk.MustQuery("desc select * from information_schema.inspection_result where item in ('ddl.lease', 'raftstore.threadpool')").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:INSPECTION_RESULT rules:[], items:["ddl.lease","raftstore.threadpool"]`)) - tk.MustQuery("desc select * from information_schema.inspection_result where item in ('ddl.lease', 'raftstore.threadpool') and rule in ('ddl', 'config')").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:INSPECTION_RESULT rules:["config","ddl"], items:["ddl.lease","raftstore.threadpool"]`)) -} - -func TestInspectionRuleTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("desc select * from information_schema.inspection_rules where type='inspection'").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:INSPECTION_RULES node_types:["inspection"]`)) - tk.MustQuery("desc select * from information_schema.inspection_rules where type='inspection' or type='summary'").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:INSPECTION_RULES node_types:["inspection","summary"]`)) - tk.MustQuery("desc select * from information_schema.inspection_rules where type='inspection' and type='summary'").Check(testkit.Rows( - `MemTableScan_5 10000.00 root table:INSPECTION_RULES skip_request: true`)) -} - func TestExplainForConnPlanCache(t *testing.T) { t.Skip("unstable") @@ -286,28 +229,6 @@ func TestExplainForConnPlanCache(t *testing.T) { wg.Wait() } -func TestSavedPlanPanicPlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, c int generated always as (a+b) stored)") - tk.MustExec("insert into t(a,b) values(1,1)") - tk.MustExec("begin") - tk.MustExec("update t set b = 2 where a = 1") - tk.MustExec("prepare stmt from 'select b from t where a > ?'") - tk.MustExec("set @p = 0") - tk.MustQuery("execute stmt using @p").Check(testkit.Rows( - "2", - )) - tk.MustExec("set @p = 1") - tk.MustQuery("execute stmt using @p").Check(testkit.Rows()) - err := tk.ExecToErr("insert into t(a,b,c) values(3,3,3)") - require.EqualError(t, err, "[planner:3105]The value specified for generated column 'c' in table 't' is not allowed.") -} - func TestExplainDotForExplainPlan(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -347,95 +268,6 @@ func TestExplainDotForQuery(t *testing.T) { } } -func TestExplainTableStorage(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("desc select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema'").Check(testkit.Rows( - "MemTableScan_5 10000.00 root table:TABLE_STORAGE_STATS schema:[\"information_schema\"]")) - tk.MustQuery("desc select * from information_schema.TABLE_STORAGE_STATS where TABLE_NAME = 'schemata'").Check(testkit.Rows( - "MemTableScan_5 10000.00 root table:TABLE_STORAGE_STATS table:[\"schemata\"]")) - tk.MustQuery("desc select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema' and TABLE_NAME = 'schemata'").Check(testkit.Rows( - "MemTableScan_5 10000.00 root table:TABLE_STORAGE_STATS schema:[\"information_schema\"], table:[\"schemata\"]")) -} - -func TestInspectionSummaryTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustQuery("desc select * from information_schema.inspection_summary where rule='ddl'").Check(testkit.Rows( - `Selection_5 8000.00 root eq(Column#1, "ddl")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where 'ddl'=rule or rule='config'").Check(testkit.Rows( - `Selection_5 8000.00 root or(eq("ddl", Column#1), eq(Column#1, "config"))`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config","ddl"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where 'ddl'=rule or rule='config' or rule='slow_query'").Check(testkit.Rows( - `Selection_5 8000.00 root or(eq("ddl", Column#1), or(eq(Column#1, "config"), eq(Column#1, "slow_query")))`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config","ddl","slow_query"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where (rule='config' or rule='slow_query') and (metrics_name='metric_name3' or metrics_name='metric_name1')").Check(testkit.Rows( - `Selection_5 8000.00 root or(eq(Column#1, "config"), eq(Column#1, "slow_query")), or(eq(Column#3, "metric_name3"), eq(Column#3, "metric_name1"))`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config","slow_query"], metric_names:["metric_name1","metric_name3"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query')").Check(testkit.Rows( - `Selection_5 8000.00 root in(Column#1, "ddl", "slow_query")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query') and metrics_name='metric_name1'").Check(testkit.Rows( - `Selection_5 8000.00 root eq(Column#3, "metric_name1"), in(Column#1, "ddl", "slow_query")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"], metric_names:["metric_name1"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query') and metrics_name in ('metric_name1', 'metric_name2')").Check(testkit.Rows( - `Selection_5 8000.00 root in(Column#1, "ddl", "slow_query"), in(Column#3, "metric_name1", "metric_name2")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"], metric_names:["metric_name1","metric_name2"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where rule='ddl' and metrics_name in ('metric_name1', 'metric_name2')").Check(testkit.Rows( - `Selection_5 8000.00 root eq(Column#1, "ddl"), in(Column#3, "metric_name1", "metric_name2")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"], metric_names:["metric_name1","metric_name2"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where rule='ddl' and metrics_name='metric_NAME3'").Check(testkit.Rows( - `Selection_5 8000.00 root eq(Column#1, "ddl"), eq(Column#3, "metric_NAME3")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"], metric_names:["metric_name3"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where rule in ('ddl', 'config') and rule in ('slow_query', 'config')").Check(testkit.Rows( - `Selection_5 8000.00 root in(Column#1, "ddl", "config"), in(Column#1, "slow_query", "config")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config"]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where metrics_name in ('metric_name1', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name4') and rule in ('ddl', 'config') and rule in ('slow_query', 'config') and quantile in (0.80, 0.90)").Check(testkit.Rows( - `Selection_5 8000.00 root in(Column#1, "ddl", "config"), in(Column#1, "slow_query", "config"), in(Column#3, "metric_name1", "metric_name4"), in(Column#3, "metric_name5", "metric_name4")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY rules:["config"], metric_names:["metric_name4"], quantiles:[0.800000,0.900000]`, - )) - tk.MustQuery("desc select * from information_schema.inspection_summary where metrics_name in ('metric_name1', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name1') and metrics_name in ('metric_name1', 'metric_name3')").Check(testkit.Rows( - `Selection_5 8000.00 root in(Column#3, "metric_name1", "metric_name3"), in(Column#3, "metric_name1", "metric_name4"), in(Column#3, "metric_name5", "metric_name1"), in(Column#3, "metric_name5", "metric_name4")`, - `└─MemTableScan_6 10000.00 root table:INSPECTION_SUMMARY skip_inspection: true`, - )) -} - -func TestExplainTiFlashSystemTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tiflashInstance := "192.168.1.7:3930" - database := "test" - table := "t" - tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_TABLES where TIFLASH_INSTANCE = '%s'", tiflashInstance)).Check(testkit.Rows( - fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_TABLES tiflash_instances:[\"%s\"]", tiflashInstance))) - tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_SEGMENTS where TIFLASH_INSTANCE = '%s'", tiflashInstance)).Check(testkit.Rows( - fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_SEGMENTS tiflash_instances:[\"%s\"]", tiflashInstance))) - tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_TABLES where TIDB_DATABASE = '%s'", database)).Check(testkit.Rows( - fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_TABLES tidb_databases:[\"%s\"]", database))) - tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_SEGMENTS where TIDB_DATABASE = '%s'", database)).Check(testkit.Rows( - fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_SEGMENTS tidb_databases:[\"%s\"]", database))) - tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_TABLES where TIDB_TABLE = '%s'", table)).Check(testkit.Rows( - fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_TABLES tidb_tables:[\"%s\"]", table))) - tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_SEGMENTS where TIDB_TABLE = '%s'", table)).Check(testkit.Rows( - fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_SEGMENTS tidb_tables:[\"%s\"]", table))) - tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_TABLES where TIFLASH_INSTANCE = '%s' and TIDB_DATABASE = '%s' and TIDB_TABLE = '%s'", tiflashInstance, database, table)).Check(testkit.Rows( - fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_TABLES tiflash_instances:[\"%s\"], tidb_databases:[\"%s\"], tidb_tables:[\"%s\"]", tiflashInstance, database, table))) - tk.MustQuery(fmt.Sprintf("desc select * from information_schema.TIFLASH_SEGMENTS where TIFLASH_INSTANCE = '%s' and TIDB_DATABASE = '%s' and TIDB_TABLE = '%s'", tiflashInstance, database, table)).Check(testkit.Rows( - fmt.Sprintf("MemTableScan_5 10000.00 root table:TIFLASH_SEGMENTS tiflash_instances:[\"%s\"], tidb_databases:[\"%s\"], tidb_tables:[\"%s\"]", tiflashInstance, database, table))) -} - func TestPointGetUserVarPlanCache(t *testing.T) { store := testkit.CreateMockStore(t) tmp := testkit.NewTestKit(t, store) @@ -892,73 +724,6 @@ func TestIndexMerge4PlanCache(t *testing.T) { tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) } -func TestSetOperations4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists t1, t2;") - tk.MustExec("CREATE TABLE `t1` (a int);") - tk.MustExec("CREATE TABLE `t2` (a int);") - tk.MustExec("insert into t1 values(1), (2);") - tk.MustExec("insert into t2 values(1), (3);") - // test for UNION - tk.MustExec("prepare stmt from 'select * from t1 where a > ? union select * from t2 where a > ?;';") - tk.MustExec("set @a=0, @b=1;") - tk.MustQuery("execute stmt using @a, @b;").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("execute stmt using @b, @a;").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b, @b;").Sort().Check(testkit.Rows("2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @a, @a;").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("prepare stmt from 'select * from t1 where a > ? union all select * from t2 where a > ?;';") - tk.MustExec("set @a=0, @b=1;") - tk.MustQuery("execute stmt using @a, @b;").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("execute stmt using @b, @a;").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b, @b;").Sort().Check(testkit.Rows("2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @a, @a;").Sort().Check(testkit.Rows("1", "1", "2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - // test for EXCEPT - tk.MustExec("prepare stmt from 'select * from t1 where a > ? except select * from t2 where a > ?;';") - tk.MustExec("set @a=0, @b=1;") - tk.MustQuery("execute stmt using @a, @a;").Sort().Check(testkit.Rows("2")) - tk.MustQuery("execute stmt using @b, @a;").Sort().Check(testkit.Rows("2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b, @b;").Sort().Check(testkit.Rows("2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @a, @b;").Sort().Check(testkit.Rows("1", "2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - // test for INTERSECT - tk.MustExec("prepare stmt from 'select * from t1 where a > ? union select * from t2 where a > ?;';") - tk.MustExec("set @a=0, @b=1;") - tk.MustQuery("execute stmt using @a, @a;").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("execute stmt using @b, @a;").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b, @b;").Sort().Check(testkit.Rows("2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @a, @b;").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - // test for UNION + INTERSECT - tk.MustExec("prepare stmt from 'select * from t1 union all select * from t1 intersect select * from t2;'") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "1", "2")) - - tk.MustExec("prepare stmt from '(select * from t1 union all select * from t1) intersect select * from t2;'") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1")) - - // test for order by and limit - tk.MustExec("prepare stmt from '(select * from t1 union all select * from t1 intersect select * from t2) order by a limit 2;'") - tk.MustQuery("execute stmt;").Sort().Check(testkit.Rows("1", "1")) -} - func TestSPM4PlanCache(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1013,375 +778,6 @@ func TestSPM4PlanCache(t *testing.T) { tk.MustExec("admin reload bindings;") } -func TestHint4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, index idx_a(a));") - - tk.MustExec("prepare stmt from 'select * from t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - -func TestIgnorePlanCacheWithPrepare(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, index idx_a(a));") - tk.MustExec("drop table if exists r;") - tk.MustExec("create table r(a int);") - - // test use_index - tk.MustExec("prepare stmt from 'select * from t;';") - tk.MustExec("create binding for select * from t using select /*+ use_index(t, idx_a) */ * from t;") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1")) - - tk.MustExec("create binding for select * from t using select /*+ ignore_plan_cache() */ * from t;") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1")) - - tk.MustExec("create binding for select * from t using select /*+ use_index(t, idx_a) */ * from t;") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1")) - - // test straight_join - tk.MustExec("prepare stmt_join from 'select * from t, r where r.a = t.a;';") - tk.MustExec("create binding for select * from t, r where r.a = t.a using select /*+ straight_join() */* from t, r where r.a = t.a;") - tk.MustQuery("execute stmt_join;").Check(testkit.Rows()) - tk.MustQuery("execute stmt_join;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt_join;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1")) - - tk.MustExec("create binding for select * from t, r where r.a = t.a using select /*+ ignore_plan_cache() */* from t, r where r.a = t.a;") - tk.MustQuery("execute stmt_join;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt_join;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1")) - - tk.MustExec("create binding for select * from t, r where r.a = t.a using select /*+ straight_join() */* from t, r where r.a = t.a;") - tk.MustQuery("execute stmt_join;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt_join;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1")) -} - -func TestSelectView4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists view_t;") - tk.MustExec("create table view_t (a int,b int)") - tk.MustExec("insert into view_t values(1,2)") - tk.MustExec("create definer='root'@'localhost' view view1 as select * from view_t") - tk.MustExec("create definer='root'@'localhost' view view2(c,d) as select * from view_t") - tk.MustExec("create definer='root'@'localhost' view view3(c,d) as select a,b from view_t") - tk.MustExec("create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb;") - tk.MustExec("prepare stmt1 from 'select * from view1;'") - tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2")) - tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("prepare stmt2 from 'select * from view2;'") - tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2")) - tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("prepare stmt3 from 'select * from view3;'") - tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2")) - tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("prepare stmt4 from 'select * from view4;'") - tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2")) - tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("drop table view_t;") - tk.MustExec("create table view_t(c int,d int)") - err := tk.ExecToErr("execute stmt1;") - require.Equal(t, "[planner:1356]View 'test.view1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them", err.Error()) - err = tk.ExecToErr("execute stmt2") - require.Equal(t, "[planner:1356]View 'test.view2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them", err.Error()) - err = tk.ExecToErr("execute stmt3") - require.Equal(t, core.ErrViewInvalid.GenWithStackByArgs("test", "view3").Error(), err.Error()) - tk.MustExec("drop table view_t;") - tk.MustExec("create table view_t(a int,b int,c int)") - tk.MustExec("insert into view_t values(1,2,3)") - - tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("alter table view_t drop column a") - tk.MustExec("alter table view_t add column a int after b") - tk.MustExec("update view_t set a=1;") - - tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt1;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt2;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt3;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt4;").Check(testkit.Rows("1 2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("drop table view_t;") - tk.MustExec("drop view view1,view2,view3,view4;") - - tk.MustExec("set @@tidb_enable_window_function = 1") - defer func() { - tk.MustExec("set @@tidb_enable_window_function = 0") - }() - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, b int)") - tk.MustExec("insert into t values (1,1),(1,2),(2,1),(2,2)") - tk.MustExec("create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t") - tk.MustExec("prepare stmt from 'select * from v;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2")) - tk.MustQuery("execute stmt;").Check(testkit.Rows("1 1 1", "1 1 2", "2 1 2", "2 2 2")) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - - tk.MustExec("drop view v;") -} - -func TestInvisibleIndex4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists t;") - tk.MustExec("CREATE TABLE t(c1 INT, index idx_c(c1));") - - tk.MustExec("prepare stmt from 'select * from t use index(idx_c) where c1 > 1;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("ALTER TABLE t ALTER INDEX idx_c INVISIBLE;") - err := tk.ExecToErr("select * from t use index(idx_c) where c1 > 1;") - require.Equal(t, "[planner:1176]Key 'idx_c' doesn't exist in table 't'", err.Error()) - - err = tk.ExecToErr("execute stmt;") - require.Equal(t, "[planner:1176]Key 'idx_c' doesn't exist in table 't'", err.Error()) -} - -func TestCTE4PlanCache(t *testing.T) { - // CTE can not be cached, because part of it will be treated as a subquery. - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt from 'with recursive cte1 as (" + - "select ? c1 " + - "union all " + - "select c1 + 1 c1 from cte1 where c1 < ?) " + - "select * from cte1;';") - tk.MustExec("set @a=5, @b=4, @c=2, @d=1;") - tk.MustQuery("execute stmt using @d, @a").Check(testkit.Rows("1", "2", "3", "4", "5")) - tk.MustQuery("execute stmt using @d, @b").Check(testkit.Rows("1", "2", "3", "4")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt using @c, @b").Check(testkit.Rows("2", "3", "4")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - // Two seed parts. - tk.MustExec("prepare stmt from 'with recursive cte1 as (" + - "select 1 c1 " + - "union all " + - "select 2 c1 " + - "union all " + - "select c1 + 1 c1 from cte1 where c1 < ?) " + - "select * from cte1 order by c1;';") - tk.MustExec("set @a=10, @b=2;") - tk.MustQuery("execute stmt using @a").Check(testkit.Rows("1", "2", "2", "3", "3", "4", "4", "5", "5", "6", "6", "7", "7", "8", "8", "9", "9", "10", "10")) - tk.MustQuery("execute stmt using @b").Check(testkit.Rows("1", "2", "2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - // Two recursive parts. - tk.MustExec("prepare stmt from 'with recursive cte1 as (" + - "select 1 c1 " + - "union all " + - "select 2 c1 " + - "union all " + - "select c1 + 1 c1 from cte1 where c1 < ? " + - "union all " + - "select c1 + ? c1 from cte1 where c1 < ?) " + - "select * from cte1 order by c1;';") - tk.MustExec("set @a=1, @b=2, @c=3, @d=4, @e=5;") - tk.MustQuery("execute stmt using @c, @b, @e;").Check(testkit.Rows("1", "2", "2", "3", "3", "3", "4", "4", "5", "5", "5", "6", "6")) - tk.MustQuery("execute stmt using @b, @a, @d;").Check(testkit.Rows("1", "2", "2", "2", "3", "3", "3", "4", "4", "4")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(a int);") - tk.MustExec("insert into t1 values(1);") - tk.MustExec("insert into t1 values(2);") - tk.MustExec("prepare stmt from 'SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*? AS b UNION ALL SELECT b+? FROM qn WHERE b=?) SELECT * FROM qn WHERE b=a);';") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip prepared plan-cache: find table test.qn failed: [schema:1146]Table 'test.qn' doesn't exist")) - tk.MustExec("set @a=1, @b=2, @c=3, @d=4, @e=5, @f=0;") - - tk.MustQuery("execute stmt using @f, @a, @f").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @a, @b, @a").Sort().Check(testkit.Rows("1", "2")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt using @a, @b, @a").Sort().Check(testkit.Rows("1", "2")) - //tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip prepared plan-cache: PhysicalApply plan is un-cacheable")) - - tk.MustExec("prepare stmt from 'with recursive c(p) as (select ?), cte(a, b) as (select 1, 1 union select a+?, 1 from cte, c where a < ?) select * from cte order by 1, 2;';") - tk.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 skip prepared plan-cache: find table test.cte failed: [schema:1146]Table 'test.cte' doesn't exist")) - tk.MustQuery("execute stmt using @a, @a, @e;").Check(testkit.Rows("1 1", "2 1", "3 1", "4 1", "5 1")) - tk.MustQuery("execute stmt using @b, @b, @c;").Check(testkit.Rows("1 1", "3 1")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - -func TestValidity4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int);") - - tk.MustExec("prepare stmt from 'select * from t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("drop database if exists plan_cache;") - tk.MustExec("create database plan_cache;") - tk.MustExec("use plan_cache;") - tk.MustExec("create table t(a int);") - tk.MustExec("insert into t values(1);") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("prepare stmt from 'select * from t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt;").Check(testkit.Rows("1")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) - - tk.MustExec("use test") // still read plan_cache.t and can hit the cache - tk.MustQuery("execute stmt;").Check(testkit.Rows("1")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) -} - -func TestListPartition4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("set @@session.tidb_enable_list_partition=1;") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, b int) PARTITION BY LIST (a) ( PARTITION p0 VALUES IN (1, 2, 3), PARTITION p1 VALUES IN (4, 5, 6));") - - tk.MustExec("set @@tidb_partition_prune_mode='static';") - tk.MustExec("prepare stmt from 'select * from t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - // The list partition plan can not be cached. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - -func TestMoreSessions4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk2 := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("use test;") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int);") - tk.MustExec("prepare stmt from 'select * from t;';") - - tk.MustQuery("execute stmt").Check(testkit.Rows()) - tk.MustQuery("execute stmt").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - - tk2.MustExec(`set tidb_enable_prepared_plan_cache=1`) - - tk2.MustExec("use test;") - require.EqualError(t, tk2.ExecToErr("execute stmt;"), "[planner:8111]Prepared statement not found") - tk2.MustExec("prepare stmt from 'select * from t;';") - tk2.MustQuery("execute stmt").Check(testkit.Rows()) - tk2.MustQuery("execute stmt").Check(testkit.Rows()) - tk2.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) - - tk.MustQuery("execute stmt").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("1")) -} - -func TestIssue28792(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("CREATE TABLE t12(a INT, b INT)") - tk.MustExec("CREATE TABLE t97(a INT, b INT UNIQUE NOT NULL);") - r1 := tk.MustQuery("EXPLAIN SELECT t12.a, t12.b FROM t12 LEFT JOIN t97 on t12.b = t97.b;").Rows() - r2 := tk.MustQuery("EXPLAIN SELECT t12.a, t12.b FROM t12 LEFT JOIN t97 use index () on t12.b = t97.b;").Rows() - require.Equal(t, r2, r1) -} - func TestExplainForJSON(t *testing.T) { store := testkit.CreateMockStore(t) tk1 := testkit.NewTestKit(t, store) diff --git a/pkg/executor/foreign_key.go b/pkg/executor/foreign_key.go index 512399c9931a6..55f34f8b02b64 100644 --- a/pkg/executor/foreign_key.go +++ b/pkg/executor/foreign_key.go @@ -174,7 +174,7 @@ func (fkc *FKCheckExec) updateRowNeedToCheck(sc *stmtctx.StatementContext, oldRo if len(oldVals) == len(newVals) { isSameValue := true for i := range oldVals { - cmp, err := oldVals[i].Compare(sc, &newVals[i], collate.GetCollator(oldVals[i].Collation())) + cmp, err := oldVals[i].Compare(sc.TypeCtx(), &newVals[i], collate.GetCollator(oldVals[i].Collation())) if err != nil || cmp != 0 { isSameValue = false break diff --git a/pkg/executor/grant.go b/pkg/executor/grant.go index 452f5a20db07c..6763c8944b8ac 100644 --- a/pkg/executor/grant.go +++ b/pkg/executor/grant.go @@ -37,6 +37,7 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" ) @@ -497,12 +498,12 @@ func (e *GrantExec) grantDynamicPriv(privName string, user *ast.UserSpec, intern // grantGlobalLevel manipulates mysql.user table. func (*GrantExec) grantGlobalLevel(priv *ast.PrivElem, user *ast.UserSpec, internalSession sessionctx.Context) error { sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET `, mysql.SystemDB, mysql.UserTable) + sqlescape.MustFormatSQL(sql, `UPDATE %n.%n SET `, mysql.SystemDB, mysql.UserTable) err := composeGlobalPrivUpdate(sql, priv.Priv, "Y") if err != nil { return err } - sqlexec.MustFormatSQL(sql, ` WHERE User=%? AND Host=%?`, user.User.Username, user.User.Hostname) + sqlescape.MustFormatSQL(sql, ` WHERE User=%? AND Host=%?`, user.User.Username, user.User.Hostname) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) @@ -523,12 +524,12 @@ func (e *GrantExec) grantDBLevel(priv *ast.PrivElem, user *ast.UserSpec, interna } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.DBTable) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.DBTable) err := composeDBPrivUpdate(sql, priv.Priv, "Y") if err != nil { return err } - sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%?", user.User.Username, user.User.Hostname, dbName) + sqlescape.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%?", user.User.Username, user.User.Hostname, dbName) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) @@ -544,12 +545,12 @@ func (e *GrantExec) grantTableLevel(priv *ast.PrivElem, user *ast.UserSpec, inte tblName := e.Level.TableName sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.TablePrivTable) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.TablePrivTable) err := composeTablePrivUpdateForGrant(internalSession, sql, priv.Priv, user.User.Username, user.User.Hostname, dbName, tblName) if err != nil { return err } - sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", user.User.Username, user.User.Hostname, dbName, tblName) + sqlescape.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", user.User.Username, user.User.Hostname, dbName, tblName) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) @@ -570,12 +571,12 @@ func (e *GrantExec) grantColumnLevel(priv *ast.PrivElem, user *ast.UserSpec, int } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.ColumnPrivTable) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.ColumnPrivTable) err := composeColumnPrivUpdateForGrant(internalSession, sql, priv.Priv, user.User.Username, user.User.Hostname, dbName, tbl.Meta().Name.O, col.Name.O) if err != nil { return err } - sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", user.User.Username, user.User.Hostname, dbName, tbl.Meta().Name.O, col.Name.O) + sqlescape.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", user.User.Username, user.User.Hostname, dbName, tbl.Meta().Name.O, col.Name.O) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) @@ -592,15 +593,15 @@ func composeGlobalPrivUpdate(sql *strings.Builder, priv mysql.PrivilegeType, val if priv != mysql.GrantPriv && !mysql.AllGlobalPrivs.Has(priv) { return exeerrors.ErrWrongUsage.GenWithStackByArgs("GLOBAL GRANT", "NON-GLOBAL PRIVILEGES") } - sqlexec.MustFormatSQL(sql, "%n=%?", priv.ColumnString(), value) + sqlescape.MustFormatSQL(sql, "%n=%?", priv.ColumnString(), value) return nil } for i, v := range mysql.AllGlobalPrivs { if i > 0 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } - sqlexec.MustFormatSQL(sql, "%n=%?", v.ColumnString(), value) + sqlescape.MustFormatSQL(sql, "%n=%?", v.ColumnString(), value) } return nil } @@ -611,15 +612,15 @@ func composeDBPrivUpdate(sql *strings.Builder, priv mysql.PrivilegeType, value s if priv != mysql.GrantPriv && !mysql.AllDBPrivs.Has(priv) { return exeerrors.ErrWrongUsage.GenWithStackByArgs("DB GRANT", "NON-DB PRIVILEGES") } - sqlexec.MustFormatSQL(sql, "%n=%?", priv.ColumnString(), value) + sqlescape.MustFormatSQL(sql, "%n=%?", priv.ColumnString(), value) return nil } for i, p := range mysql.AllDBPrivs { if i > 0 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } - sqlexec.MustFormatSQL(sql, "%n=%?", p.ColumnString(), value) + sqlescape.MustFormatSQL(sql, "%n=%?", p.ColumnString(), value) } return nil } @@ -649,7 +650,7 @@ func composeTablePrivUpdateForGrant(ctx sessionctx.Context, sql *strings.Builder } } - sqlexec.MustFormatSQL(sql, `Table_priv=%?, Column_priv=%?, Grantor=%?`, setToString(newTablePriv), setToString(newColumnPriv), ctx.GetSessionVars().User.String()) + sqlescape.MustFormatSQL(sql, `Table_priv=%?, Column_priv=%?, Grantor=%?`, setToString(newTablePriv), setToString(newColumnPriv), ctx.GetSessionVars().User.String()) return nil } @@ -669,7 +670,7 @@ func composeColumnPrivUpdateForGrant(ctx sessionctx.Context, sql *strings.Builde } } - sqlexec.MustFormatSQL(sql, `Column_priv=%?`, setToString(newColumnPriv)) + sqlescape.MustFormatSQL(sql, `Column_priv=%?`, setToString(newColumnPriv)) return nil } diff --git a/pkg/executor/grant_test.go b/pkg/executor/grant_test.go index 3a10b7aa5f13b..49cbc785ae56f 100644 --- a/pkg/executor/grant_test.go +++ b/pkg/executor/grant_test.go @@ -19,9 +19,6 @@ import ( "strings" "testing" - "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/infoschema" - "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/testkit" @@ -106,29 +103,6 @@ func TestGrantDBScope(t *testing.T) { require.True(t, terror.ErrorEqual(err, exeerrors.ErrWrongUsage.GenWithStackByArgs("DB GRANT", "NON-DB PRIVILEGES"))) } -func TestWithGrantOption(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - // Create a new user. - createUserSQL := `CREATE USER 'testWithGrant'@'localhost' IDENTIFIED BY '123';` - tk.MustExec(createUserSQL) - // Make sure all the db privs for new user is empty. - sql := `SELECT * FROM mysql.db WHERE User="testWithGrant" and host="localhost"` - tk.MustQuery(sql).Check(testkit.Rows()) - - // Grant select priv to the user, with grant option. - tk.MustExec("GRANT select ON test.* TO 'testWithGrant'@'localhost' WITH GRANT OPTION;") - tk.MustQuery("SELECT grant_priv FROM mysql.DB WHERE User=\"testWithGrant\" and host=\"localhost\" and db=\"test\"").Check(testkit.Rows("Y")) - - tk.MustExec("CREATE USER 'testWithGrant1'") - tk.MustQuery("SELECT grant_priv FROM mysql.user WHERE User=\"testWithGrant1\"").Check(testkit.Rows("N")) - tk.MustExec("GRANT ALL ON *.* TO 'testWithGrant1'") - tk.MustQuery("SELECT grant_priv FROM mysql.user WHERE User=\"testWithGrant1\"").Check(testkit.Rows("N")) - tk.MustExec("GRANT ALL ON *.* TO 'testWithGrant1' WITH GRANT OPTION") - tk.MustQuery("SELECT grant_priv FROM mysql.user WHERE User=\"testWithGrant1\"").Check(testkit.Rows("Y")) -} - func TestGrantTableScope(t *testing.T) { store := testkit.CreateMockStore(t) @@ -216,388 +190,3 @@ func TestGrantColumnScope(t *testing.T) { tk.MustGetErrMsg("GRANT SUPER(c2) ON test3 TO 'testCol1'@'localhost';", "[executor:1221]Incorrect usage of COLUMN GRANT and NON-COLUMN PRIVILEGES") } - -func TestIssue2456(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE USER 'dduser'@'%' IDENTIFIED by '123456';") - tk.MustExec("CREATE DATABASE `dddb_%`;") - tk.MustExec("CREATE table `dddb_%`.`te%` (id int);") - tk.MustExec("GRANT ALL PRIVILEGES ON `dddb_%`.* TO 'dduser'@'%';") - tk.MustExec("GRANT ALL PRIVILEGES ON `dddb_%`.`te%` to 'dduser'@'%';") -} - -func TestNoAutoCreateUser(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`DROP USER IF EXISTS 'test'@'%'`) - tk.MustExec(`SET sql_mode='NO_AUTO_CREATE_USER'`) - _, err := tk.Exec(`GRANT ALL PRIVILEGES ON *.* to 'test'@'%' IDENTIFIED BY 'xxx'`) - require.Error(t, err) - require.True(t, terror.ErrorEqual(err, exeerrors.ErrCantCreateUserWithGrant)) -} - -func TestCreateUserWhenGrant(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`DROP USER IF EXISTS 'test'@'%'`) - // This only applies to sql_mode:NO_AUTO_CREATE_USER off - tk.MustExec(`SET SQL_MODE=''`) - tk.MustExec(`GRANT ALL PRIVILEGES ON *.* to 'test'@'%' IDENTIFIED BY 'xxx'`) - // Make sure user is created automatically when grant to a non-exists one. - tk.MustQuery(`SELECT user FROM mysql.user WHERE user='test' and host='%'`).Check( - testkit.Rows("test"), - ) - tk.MustExec(`DROP USER IF EXISTS 'test'@'%'`) - // Grant without a password. - tk.MustExec(`GRANT ALL PRIVILEGES ON *.* to 'test'@'%'`) - // Make sure user is created automatically when grant to a non-exists one. - tk.MustQuery(`SELECT user, plugin FROM mysql.user WHERE user='test' and host='%'`).Check( - testkit.Rows("test mysql_native_password"), - ) - tk.MustExec(`DROP USER IF EXISTS 'test'@'%'`) -} - -func TestCreateUserWithTooLongName(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - err := tk.ExecToErr("CREATE USER '1234567890abcdefGHIKL1234567890abcdefGHIKL@localhost'") - require.Truef(t, terror.ErrorEqual(err, exeerrors.ErrWrongStringLength), "ERROR 1470 (HY000): String '1234567890abcdefGHIKL1234567890abcdefGHIKL' is too long for user name (should be no longer than 32)") - err = tk.ExecToErr("CREATE USER 'some_user_name@host_1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890X'") - require.Truef(t, terror.ErrorEqual(err, exeerrors.ErrWrongStringLength), "ERROR 1470 (HY000): String 'host_1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij12345' is too long for host name (should be no longer than 255)") -} - -func TestGrantPrivilegeAtomic(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`drop role if exists r1, r2, r3, r4;`) - tk.MustExec(`create role r1, r2, r3;`) - tk.MustExec(`create table test.testatomic(x int);`) - - _, err := tk.Exec(`grant update, select, insert, delete on *.* to r1, r2, r4;`) - require.True(t, terror.ErrorEqual(err, exeerrors.ErrCantCreateUserWithGrant)) - tk.MustQuery(`select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.user where user in ('r1', 'r2', 'r3', 'r4') and host = "%";`).Check(testkit.Rows( - "N N N N", - "N N N N", - "N N N N", - )) - tk.MustExec(`grant update, select, insert, delete on *.* to r1, r2, r3;`) - _, err = tk.Exec(`revoke all on *.* from r1, r2, r4, r3;`) - require.Error(t, err) - tk.MustQuery(`select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.user where user in ('r1', 'r2', 'r3', 'r4') and host = "%";`).Check(testkit.Rows( - "Y Y Y Y", - "Y Y Y Y", - "Y Y Y Y", - )) - - err = tk.ExecToErr(`grant update, select, insert, delete on test.* to r1, r2, r4;`) - require.True(t, terror.ErrorEqual(err, exeerrors.ErrCantCreateUserWithGrant)) - tk.MustQuery(`select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.db where user in ('r1', 'r2', 'r3', 'r4') and host = "%";`).Check(testkit.Rows()) - tk.MustExec(`grant update, select, insert, delete on test.* to r1, r2, r3;`) - err = tk.ExecToErr(`revoke all on *.* from r1, r2, r4, r3;`) - require.Error(t, err) - tk.MustQuery(`select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.db where user in ('r1', 'r2', 'r3', 'r4') and host = "%";`).Check(testkit.Rows( - "Y Y Y Y", - "Y Y Y Y", - "Y Y Y Y", - )) - - err = tk.ExecToErr(`grant update, select, insert, delete on test.testatomic to r1, r2, r4;`) - require.True(t, terror.ErrorEqual(err, exeerrors.ErrCantCreateUserWithGrant)) - tk.MustQuery(`select Table_priv from mysql.tables_priv where user in ('r1', 'r2', 'r3', 'r4') and host = "%";`).Check(testkit.Rows()) - tk.MustExec(`grant update, select, insert, delete on test.testatomic to r1, r2, r3;`) - err = tk.ExecToErr(`revoke all on *.* from r1, r2, r4, r3;`) - require.Error(t, err) - tk.MustQuery(`select Table_priv from mysql.tables_priv where user in ('r1', 'r2', 'r3', 'r4') and host = "%";`).Check(testkit.Rows( - "Select,Insert,Update,Delete", - "Select,Insert,Update,Delete", - "Select,Insert,Update,Delete", - )) - - tk.MustExec(`drop role if exists r1, r2, r3, r4;`) - tk.MustExec(`drop table test.testatomic;`) -} - -func TestIssue2654(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`DROP USER IF EXISTS 'test'@'%'`) - tk.MustExec(`CREATE USER 'test'@'%' IDENTIFIED BY 'test'`) - tk.MustExec("GRANT SELECT ON test.* to 'test'") - rows := tk.MustQuery(`SELECT user,host FROM mysql.user WHERE user='test' and host='%'`) - rows.Check(testkit.Rows(`test %`)) -} - -func TestGrantUnderANSIQuotes(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - // Fix a bug that the GrantExec fails in ANSI_QUOTES sql mode - // The bug is caused by the improper usage of double quotes like: - // INSERT INTO mysql.user ... VALUES ("..", "..", "..") - tk.MustExec(`SET SQL_MODE='ANSI_QUOTES'`) - tk.MustExec(`GRANT ALL PRIVILEGES ON video_ulimit.* TO web@'%' IDENTIFIED BY 'eDrkrhZ>l2sV'`) - tk.MustExec(`REVOKE ALL PRIVILEGES ON video_ulimit.* FROM web@'%';`) - tk.MustExec(`DROP USER IF EXISTS 'web'@'%'`) -} - -func TestMaintainRequire(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - // test create with require - tk.MustExec(`CREATE USER 'ssl_auser'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'`) - tk.MustExec(`CREATE USER 'ssl_buser'@'%' require subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'`) - tk.MustExec(`CREATE USER 'ssl_cuser'@'%' require cipher 'AES128-GCM-SHA256'`) - tk.MustExec(`CREATE USER 'ssl_duser'@'%'`) - tk.MustExec(`CREATE USER 'ssl_euser'@'%' require none`) - tk.MustExec(`CREATE USER 'ssl_fuser'@'%' require ssl`) - tk.MustExec(`CREATE USER 'ssl_guser'@'%' require x509`) - tk.MustQuery("select * from mysql.global_priv where `user` like 'ssl_%'").Check(testkit.Rows( - "% ssl_auser {\"ssl_type\":3,\"ssl_cipher\":\"AES128-GCM-SHA256\",\"x509_issuer\":\"/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US\",\"x509_subject\":\"/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH\"}", - "% ssl_buser {\"ssl_type\":3,\"ssl_cipher\":\"AES128-GCM-SHA256\",\"x509_subject\":\"/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH\"}", - "% ssl_cuser {\"ssl_type\":3,\"ssl_cipher\":\"AES128-GCM-SHA256\"}", - "% ssl_duser {}", - "% ssl_euser {}", - "% ssl_fuser {\"ssl_type\":1}", - "% ssl_guser {\"ssl_type\":2}", - )) - - // test grant with require - tk.MustExec("CREATE USER 'u1'@'%'") - tk.MustExec("GRANT ALL ON *.* TO 'u1'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' and subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH'") // add new require. - tk.MustQuery("select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'").Check(testkit.Rows("{\"ssl_type\":3,\"x509_issuer\":\"/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US\",\"x509_subject\":\"/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH\"}")) - tk.MustExec("GRANT ALL ON *.* TO 'u1'@'%' require cipher 'AES128-GCM-SHA256'") // modify always overwrite. - tk.MustQuery("select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'").Check(testkit.Rows("{\"ssl_type\":3,\"ssl_cipher\":\"AES128-GCM-SHA256\"}")) - tk.MustExec("GRANT select ON *.* TO 'u1'@'%'") // modify without require should not modify old require. - tk.MustQuery("select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'").Check(testkit.Rows("{\"ssl_type\":3,\"ssl_cipher\":\"AES128-GCM-SHA256\"}")) - tk.MustExec("GRANT ALL ON *.* TO 'u1'@'%' require none") // use require none to clean up require. - tk.MustQuery("select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'").Check(testkit.Rows("{}")) - - // test alter with require - tk.MustExec("CREATE USER 'u2'@'%'") - tk.MustExec("alter user 'u2'@'%' require ssl") - tk.MustQuery("select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'").Check(testkit.Rows("{\"ssl_type\":1}")) - tk.MustExec("alter user 'u2'@'%' require x509") - tk.MustQuery("select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'").Check(testkit.Rows("{\"ssl_type\":2}")) - tk.MustExec("alter user 'u2'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'") - tk.MustQuery("select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'").Check(testkit.Rows("{\"ssl_type\":3,\"ssl_cipher\":\"AES128-GCM-SHA256\",\"x509_issuer\":\"/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US\",\"x509_subject\":\"/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH\"}")) - tk.MustExec("alter user 'u2'@'%' require none") - tk.MustQuery("select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'").Check(testkit.Rows("{}")) - - // test show create user - tk.MustExec(`CREATE USER 'u3'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'`) - tk.MustQuery("show create user 'u3'").Check(testkit.Rows("CREATE USER 'u3'@'%' IDENTIFIED WITH 'mysql_native_password' AS '' REQUIRE CIPHER 'AES128-GCM-SHA256' ISSUER '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' SUBJECT '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK PASSWORD HISTORY DEFAULT PASSWORD REUSE INTERVAL DEFAULT")) - - // check issuer/subject/cipher value - err := tk.ExecToErr(`CREATE USER 'u4'@'%' require issuer 'CN=TiDB,OU=PingCAP'`) - require.Error(t, err) - err = tk.ExecToErr(`CREATE USER 'u5'@'%' require subject '/CN=TiDB\OU=PingCAP'`) - require.Error(t, err) - err = tk.ExecToErr(`CREATE USER 'u6'@'%' require subject '/CN=TiDB\NC=PingCAP'`) - require.Error(t, err) - err = tk.ExecToErr(`CREATE USER 'u7'@'%' require cipher 'AES128-GCM-SHA1'`) - require.Error(t, err) - err = tk.ExecToErr(`CREATE USER 'u8'@'%' require subject '/CN'`) - require.Error(t, err) - tk.MustGetErrMsg(`CREATE USER 'u9'@'%' require cipher 'TLS_AES_256_GCM_SHA384' cipher 'RC4-SHA'`, "Duplicate require CIPHER clause") - tk.MustGetErrMsg(`CREATE USER 'u9'@'%' require issuer 'CN=TiDB,OU=PingCAP' issuer 'CN=TiDB,OU=PingCAP2'`, "Duplicate require ISSUER clause") - tk.MustGetErrMsg(`CREATE USER 'u9'@'%' require subject '/CN=TiDB\OU=PingCAP' subject '/CN=TiDB\OU=PingCAP2'`, "Duplicate require SUBJECT clause") - err = tk.ExecToErr(`CREATE USER 'u9'@'%' require ssl ssl`) - require.Error(t, err) - err = tk.ExecToErr(`CREATE USER 'u9'@'%' require x509 x509`) - require.Error(t, err) -} - -func TestMaintainAuthString(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`CREATE USER 'maint_auth_str1'@'%' IDENTIFIED BY 'foo'`) - tk.MustQuery("SELECT authentication_string FROM mysql.user WHERE `Host` = '%' and `User` = 'maint_auth_str1'").Check(testkit.Rows("*F3A2A51A9B0F2BE2468926B4132313728C250DBF")) - tk.MustExec(`ALTER USER 'maint_auth_str1'@'%' REQUIRE SSL`) - tk.MustQuery("SELECT authentication_string FROM mysql.user WHERE `Host` = '%' and `User` = 'maint_auth_str1'").Check(testkit.Rows("*F3A2A51A9B0F2BE2468926B4132313728C250DBF")) -} - -func TestGrantOnNonExistTable(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user genius") - tk.MustExec("use test") - err := tk.ExecToErr("select * from nonexist") - require.True(t, terror.ErrorEqual(err, infoschema.ErrTableNotExists)) - err = tk.ExecToErr("grant Select,Insert on nonexist to 'genius'") - require.True(t, terror.ErrorEqual(err, infoschema.ErrTableNotExists)) - - tk.MustExec("create table if not exists xx (id int)") - // Case insensitive, differ from MySQL default behaviour. - // In TiDB, system variable lower_case_table_names = 2, which means compare table name using lower case. - tk.MustExec("grant Select,Insert on XX to 'genius'") - tk.MustExec("grant Select,Insert on xx to 'genius'") - tk.MustExec("grant Select,Update on test.xx to 'genius'") - - // issue #29268 - tk.MustExec("CREATE DATABASE d29268") - defer tk.MustExec("DROP DATABASE IF EXISTS d29268") - tk.MustExec("USE d29268") - tk.MustExec("CREATE USER u29268") - defer tk.MustExec("DROP USER u29268") - - // without create privilege - err = tk.ExecToErr("GRANT SELECT ON t29268 TO u29268") - require.Error(t, err) - require.True(t, terror.ErrorEqual(err, infoschema.ErrTableNotExists)) - err = tk.ExecToErr("GRANT DROP, INSERT ON t29268 TO u29268") - require.Error(t, err) - require.True(t, terror.ErrorEqual(err, infoschema.ErrTableNotExists)) - err = tk.ExecToErr("GRANT UPDATE, CREATE VIEW, SHOW VIEW ON t29268 TO u29268") - require.Error(t, err) - require.True(t, terror.ErrorEqual(err, infoschema.ErrTableNotExists)) - err = tk.ExecToErr("GRANT DELETE, REFERENCES, ALTER ON t29268 TO u29268") - require.Error(t, err) - require.True(t, terror.ErrorEqual(err, infoschema.ErrTableNotExists)) - - // with create privilege - tk.MustExec("GRANT CREATE ON t29268 TO u29268") - tk.MustExec("GRANT CREATE, SELECT ON t29268 TO u29268") - tk.MustExec("GRANT CREATE, DROP, INSERT ON t29268 TO u29268") - - // check privilege - tk.Session().Auth(&auth.UserIdentity{Username: "u29268", Hostname: "localhost"}, nil, nil, nil) - tk.MustExec("USE d29268") - tk.MustExec("CREATE TABLE t29268 (c1 int)") - tk.MustExec("INSERT INTO t29268 VALUES (1), (2)") - tk.MustQuery("SELECT c1 FROM t29268").Check(testkit.Rows("1", "2")) - tk.MustExec("DROP TABLE t29268") - - // check grant all - tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil) - tk.MustExec("GRANT ALL ON t29268 TO u29268") -} - -func TestIssue22721(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table if not exists xx (id int)") - tk.MustExec("CREATE USER 'sync_ci_data'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU';") - tk.MustExec("GRANT USAGE ON *.* TO 'sync_ci_data'@'%';") - tk.MustExec("GRANT USAGE ON sync_ci_data.* TO 'sync_ci_data'@'%';") - tk.MustExec("GRANT USAGE ON test.* TO 'sync_ci_data'@'%';") - tk.MustExec("GRANT USAGE ON test.xx TO 'sync_ci_data'@'%';") -} - -func TestPerformanceSchemaPrivGrant(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create user issue27867;") - defer func() { - tk.MustExec("drop user issue27867;") - }() - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "localhost"}, nil, nil, nil)) - tk.MustGetErrCode("grant all on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - // Check case insensitivity - tk.MustGetErrCode("grant all on PERFormanCE_scHemA.* to issue27867;", errno.ErrDBaccessDenied) - // Check other database privileges - tk.MustExec("grant select on performance_schema.* to issue27867;") - tk.MustGetErrCode("grant insert on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant update on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant delete on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant drop on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant lock tables on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant create on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant references on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant alter on PERFormAnCE_scHemA.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant execute on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant index on PERFormanCE_scHemA.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant create view on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) - tk.MustGetErrCode("grant show view on performance_schema.* to issue27867;", errno.ErrDBaccessDenied) -} - -func TestGrantDynamicPrivs(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user dyn") - - err := tk.ExecToErr("GRANT BACKUP_ADMIN ON test.* TO dyn") - require.True(t, terror.ErrorEqual(err, exeerrors.ErrIllegalPrivilegeLevel)) - err = tk.ExecToErr("GRANT BOGUS_GRANT ON *.* TO dyn") - require.True(t, terror.ErrorEqual(err, exeerrors.ErrDynamicPrivilegeNotRegistered)) - - tk.MustExec("GRANT BACKUP_Admin ON *.* TO dyn") // grant one priv - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % BACKUP_ADMIN N")) - - tk.MustExec("GRANT SYSTEM_VARIABLES_ADMIN, BACKUP_ADMIN ON *.* TO dyn") // grant multiple - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check( - testkit.Rows("dyn % BACKUP_ADMIN N", "dyn % SYSTEM_VARIABLES_ADMIN N"), - ) - - tk.MustExec("GRANT ROLE_ADMIN, BACKUP_ADMIN ON *.* TO dyn WITH GRANT OPTION") // grant multiple with GRANT option. - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check( - testkit.Rows("dyn % BACKUP_ADMIN Y", "dyn % ROLE_ADMIN Y", "dyn % SYSTEM_VARIABLES_ADMIN N"), - ) - - tk.MustExec("GRANT SYSTEM_VARIABLES_ADMIN, Select, ROLE_ADMIN ON *.* TO dyn") // grant mixed dynamic/non dynamic - tk.MustQuery("SELECT Grant_Priv FROM mysql.user WHERE `Host` = '%' AND `User` = 'dyn'").Check(testkit.Rows("N")) - tk.MustQuery("SELECT WITH_GRANT_OPTION FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' AND Priv='SYSTEM_VARIABLES_ADMIN'").Check(testkit.Rows("N")) - - tk.MustExec("GRANT CONNECTION_ADMIN, Insert ON *.* TO dyn WITH GRANT OPTION") // grant mixed dynamic/non dynamic with GRANT option. - tk.MustQuery("SELECT Grant_Priv FROM mysql.user WHERE `Host` = '%' AND `User` = 'dyn'").Check(testkit.Rows("Y")) - tk.MustQuery("SELECT WITH_GRANT_OPTION FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' AND Priv='CONNECTION_ADMIN'").Check(testkit.Rows("Y")) -} - -func TestNonExistTableIllegalGrant(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user u29302") - defer tk.MustExec("drop user u29302") - // Table level, not existing table, illegal privilege - tk.MustGetErrCode("grant create temporary tables on NotExistsD29302.NotExistsT29302 to u29302", mysql.ErrIllegalGrantForTable) - tk.MustGetErrCode("grant lock tables on test.NotExistsT29302 to u29302", mysql.ErrIllegalGrantForTable) - // Column level, not existing table, illegal privilege - tk.MustGetErrCode("grant create temporary tables (NotExistsCol) on NotExistsD29302.NotExistsT29302 to u29302;", mysql.ErrWrongUsage) -} - -func TestIssue34610(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("DROP DATABASE IF EXISTS d1;") - tk.MustExec("CREATE DATABASE d1;") - tk.MustExec("USE d1;") - tk.MustExec("CREATE USER user_1@localhost;") - defer func() { - tk.MustExec("DROP DATABASE d1;") - tk.MustExec("DROP USER user_1@localhost;") - }() - - tk.MustExec("CREATE TABLE T1(f1 INT);") - tk.MustGetErrCode("CREATE TABLE t1(f1 INT);", mysql.ErrTableExists) - tk.MustExec("GRANT SELECT ON T1 to user_1@localhost;") - tk.MustExec("GRANT SELECT ON t1 to user_1@localhost;") -} - -func TestIssue38293(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.Session().GetSessionVars().User = &auth.UserIdentity{Username: "root", Hostname: "localhost"} - tk.MustExec("DROP USER IF EXISTS test") - tk.MustExec("CREATE USER test") - defer func() { - tk.MustExec("DROP USER test") - }() - tk.MustExec("GRANT SELECT ON `mysql`.`db` TO test") - tk.MustQuery("SELECT `Grantor` FROM `mysql`.`tables_priv` WHERE User = 'test'").Check(testkit.Rows("root@localhost")) -} diff --git a/pkg/executor/hash_table.go b/pkg/executor/hash_table.go index ffed29de0da4c..dbc9744c93e91 100644 --- a/pkg/executor/hash_table.go +++ b/pkg/executor/hash_table.go @@ -152,6 +152,47 @@ func (c *hashRowContainer) GetMatchedRows(probeKey uint64, probeRow chunk.Row, h return matchedRows, err } +// GetOneMatchedRow get one matched rows from probeRow. +func (c *hashRowContainer) GetOneMatchedRow(probeKey uint64, probeRow chunk.Row, hCtx *hashContext) (*chunk.Row, error) { + var err error + innerEntry := c.hashTable.Get(probeKey) + if innerEntry == nil { + return nil, err + } + var matchedRow chunk.Row + + if c.chkBuf != nil { + c.chkBuf.Reset() + } + capacity := 0 + + for i := 0; innerEntry != nil; i, innerEntry = i+1, innerEntry.next { + ptr := innerEntry.ptr + matchedRow, c.chkBuf, err = c.rowContainer.GetRowAndAppendToChunkIfInDisk(ptr, c.chkBuf) + if err != nil { + return nil, err + } + var ok bool + ok, err = c.matchJoinKey(matchedRow, probeRow, hCtx) + if err != nil { + return nil, err + } + if ok { + return &matchedRow, nil + } + atomic.AddInt64(&c.stat.probeCollision, 1) + if i == 0 { + capacity = c.chkBuf.Capacity() + if capacity < 128 { + capacity = 128 + } + } else if (i+1)%capacity == 0 { + c.chkBuf.Reset() + } + } + return nil, err +} + func (c *hashRowContainer) GetAllMatchedRows(probeHCtx *hashContext, probeSideRow chunk.Row, probeKeyNullBits *bitmap.ConcurrentBitmap, matched []chunk.Row, needCheckBuildColPos, needCheckProbeColPos []int, needCheckBuildTypes, needCheckProbeTypes []*types.FieldType) ([]chunk.Row, error) { // for NAAJ probe row with null, we should match them with all build rows. @@ -232,7 +273,11 @@ const rowPtrSize = int64(unsafe.Sizeof(chunk.RowPtr{})) // h and buf. func (c *hashRowContainer) GetMatchedRowsAndPtrs(probeKey uint64, probeRow chunk.Row, hCtx *hashContext, matched []chunk.Row, matchedPtrs []chunk.RowPtr, needPtr bool) ([]chunk.Row, []chunk.RowPtr, error) { var err error - innerPtrs := c.hashTable.Get(probeKey) + entry := c.hashTable.Get(probeKey) + var innerPtrs []chunk.RowPtr + for ; entry != nil; entry = entry.next { + innerPtrs = append(innerPtrs, entry.ptr) + } if len(innerPtrs) == 0 { return nil, nil, err } @@ -244,10 +289,9 @@ func (c *hashRowContainer) GetMatchedRowsAndPtrs(probeKey uint64, probeRow chunk var ( matchedDataSize = int64(cap(matched))*rowSize + int64(cap(matchedPtrs))*rowPtrSize needTrackMemUsage = cap(innerPtrs) > signalCheckpointForJoinMask - lastChunkBufPointer *chunk.Chunk + lastChunkBufPointer = c.chkBuf memDelta int64 ) - c.chkBuf = nil c.memTracker.Consume(-c.chkBufSizeForOneProbe) if needTrackMemUsage { c.memTracker.Consume(int64(cap(innerPtrs)) * rowPtrSize) @@ -265,7 +309,7 @@ func (c *hashRowContainer) GetMatchedRowsAndPtrs(probeKey uint64, probeRow chunk if err != nil { return nil, nil, err } - if needTrackMemUsage && c.chkBuf != lastChunkBufPointer && lastChunkBufPointer != nil { + if c.chkBuf != lastChunkBufPointer && lastChunkBufPointer != nil { lastChunkSize := lastChunkBufPointer.MemoryUsage() c.chkBufSizeForOneProbe += lastChunkSize memDelta += lastChunkSize @@ -565,7 +609,12 @@ func (es *entryStore) GetStore() (e *entry, memDelta int64) { type baseHashTable interface { Put(hashKey uint64, rowPtr chunk.RowPtr) - Get(hashKey uint64) (rowPtrs []chunk.RowPtr) + // e := Get(hashKey) + // for ; e != nil; e = e.next { + // rowPtr := e.ptr + // ... + // } + Get(hashKey uint64) *entry Len() uint64 // GetAndCleanMemoryDelta gets and cleans the memDelta of the baseHashTable. Memory delta will be cleared after each fetch. // It indicates the memory delta of the baseHashTable since the last calling GetAndCleanMemoryDelta(). @@ -611,13 +660,9 @@ func (ht *unsafeHashTable) Put(hashKey uint64, rowPtr chunk.RowPtr) { } // Get gets the values of the "key" and appends them to "values". -func (ht *unsafeHashTable) Get(hashKey uint64) (rowPtrs []chunk.RowPtr) { +func (ht *unsafeHashTable) Get(hashKey uint64) *entry { entryAddr := ht.hashMap[hashKey] - for entryAddr != nil { - rowPtrs = append(rowPtrs, entryAddr.ptr) - entryAddr = entryAddr.next - } - return + return entryAddr } // Len returns the number of rowPtrs in the unsafeHashTable, the number of keys may be less than Len @@ -674,13 +719,9 @@ func (ht *concurrentMapHashTable) Put(hashKey uint64, rowPtr chunk.RowPtr) { } // Get gets the values of the "key" and appends them to "values". -func (ht *concurrentMapHashTable) Get(hashKey uint64) (rowPtrs []chunk.RowPtr) { +func (ht *concurrentMapHashTable) Get(hashKey uint64) *entry { entryAddr, _ := ht.hashMap.Get(hashKey) - for entryAddr != nil { - rowPtrs = append(rowPtrs, entryAddr.ptr) - entryAddr = entryAddr.next - } - return + return entryAddr } // Iter gets the every value of the hash table. diff --git a/pkg/executor/import_into_test.go b/pkg/executor/import_into_test.go index f048fe7fc7391..284da6c67974c 100644 --- a/pkg/executor/import_into_test.go +++ b/pkg/executor/import_into_test.go @@ -15,28 +15,12 @@ package executor_test import ( - "fmt" "testing" - "github.com/pingcap/tidb/pkg/executor/importer" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/sem" - "github.com/stretchr/testify/require" ) -func TestImportIntoExplicitTransaction(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t (id int);") - tk.MustExec(`BEGIN`) - err := tk.ExecToErr("IMPORT INTO t FROM '/file.csv'") - require.Error(t, err) - require.Regexp(t, "cannot run IMPORT INTO in explicit transaction", err.Error()) - tk.MustExec("commit") -} - func TestSecurityEnhancedMode(t *testing.T) { store := testkit.CreateMockStore(t) @@ -49,136 +33,3 @@ func TestSecurityEnhancedMode(t *testing.T) { // regardless of what privileges they have available. tk.MustGetErrMsg("IMPORT INTO test.t FROM '/file.csv'", "[planner:8132]Feature 'IMPORT INTO from server disk' is not supported when security enhanced mode is enabled") } - -func TestImportIntoOptionsNegativeCase(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t (id int);") - - cases := []struct { - OptionStr string - Err error - }{ - {OptionStr: "xx=1", Err: exeerrors.ErrUnknownOption}, - {OptionStr: "detached=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "detached, detached", Err: exeerrors.ErrDuplicateOption}, - - {OptionStr: "character_set=true", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set=true", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set=''", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "character_set='aa'", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "fields_terminated_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_terminated_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_terminated_by=true", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_terminated_by=''", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "fields_enclosed_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_enclosed_by='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_enclosed_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_enclosed_by=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "fields_escaped_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_escaped_by='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_escaped_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_escaped_by=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "fields_defined_null_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_defined_null_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "fields_defined_null_by=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "lines_terminated_by=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "lines_terminated_by=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "lines_terminated_by=true", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "lines_terminated_by=''", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "skip_rows=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "skip_rows=''", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "skip_rows=-1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "skip_rows=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "split_file='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "split_file, skip_rows=2", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "disk_quota='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "disk_quota='220MiBxxx'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "disk_quota=1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "disk_quota=false", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "disk_quota=null", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "thread='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "thread=0", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "thread=false", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "thread=-100", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "thread=null", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "max_write_speed='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "max_write_speed='11aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "max_write_speed=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "max_write_speed=-1", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "max_write_speed=false", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "checksum_table=''", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "checksum_table=123", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "checksum_table=false", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "checksum_table=null", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "record_errors='aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "record_errors='111aa'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "record_errors=-123", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "record_errors=null", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "record_errors=true", Err: exeerrors.ErrInvalidOptionVal}, - - {OptionStr: "cloud_storage_uri=123", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "cloud_storage_uri=':'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "cloud_storage_uri='sdsd'", Err: exeerrors.ErrInvalidOptionVal}, - {OptionStr: "cloud_storage_uri='http://sdsd'", Err: exeerrors.ErrInvalidOptionVal}, - } - - sqlTemplate := "import into t from '/file.csv' with %s" - for _, c := range cases { - sql := fmt.Sprintf(sqlTemplate, c.OptionStr) - err := tk.ExecToErr(sql) - require.ErrorIs(t, err, c.Err, sql) - } - - nonCSVCases := []struct { - OptionStr string - Err error - }{ - {OptionStr: "character_set='utf8'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "fields_terminated_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "fields_enclosed_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "fields_escaped_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "fields_defined_null_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "lines_terminated_by='a'", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "skip_rows=1", Err: exeerrors.ErrLoadDataUnsupportedOption}, - {OptionStr: "split_file", Err: exeerrors.ErrLoadDataUnsupportedOption}, - } - - sqlTemplate = "import into t from '/file.csv' format '%s' with %s" - for _, c := range nonCSVCases { - for _, format := range []string{importer.DataFormatParquet, importer.DataFormatSQL} { - sql := fmt.Sprintf(sqlTemplate, format, c.OptionStr) - err := tk.ExecToErr(sql) - require.ErrorIs(t, err, c.Err, sql) - } - } - - parameterCheck := []struct { - sql string - Err error - }{ - {sql: "import into t from ''", Err: exeerrors.ErrLoadDataEmptyPath}, - {sql: "import into t from '/a.csv' format 'xx'", Err: exeerrors.ErrLoadDataUnsupportedFormat}, - } - - for _, c := range parameterCheck { - err := tk.ExecToErr(c.sql) - require.ErrorIs(t, err, c.Err, c.sql) - } -} diff --git a/pkg/executor/importer/BUILD.bazel b/pkg/executor/importer/BUILD.bazel index 3b691ad85cbcb..07f6fb8622569 100644 --- a/pkg/executor/importer/BUILD.bazel +++ b/pkg/executor/importer/BUILD.bazel @@ -88,7 +88,7 @@ go_test( embed = [":importer"], flaky = True, race = "on", - shard_count = 17, + shard_count = 19, deps = [ "//br/pkg/errors", "//br/pkg/lightning/backend/encode", diff --git a/pkg/executor/importer/OWNERS b/pkg/executor/importer/OWNERS new file mode 100644 index 0000000000000..6f03c32c76b51 --- /dev/null +++ b/pkg/executor/importer/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-executor-import diff --git a/pkg/executor/importer/import.go b/pkg/executor/importer/import.go index fa1a4e19ef890..4d4cc844f85f3 100644 --- a/pkg/executor/importer/import.go +++ b/pkg/executor/importer/import.go @@ -136,6 +136,13 @@ var ( // LoadDataReadBlockSize is exposed for test. LoadDataReadBlockSize = int64(config.ReadBlockSize) + + supportedSuffixForServerDisk = []string{ + ".csv", ".sql", ".parquet", + ".gz", ".gzip", + ".zstd", ".zst", + ".snappy", + } ) // GetKVStore returns a kv.Storage. @@ -656,7 +663,6 @@ func (p *Plan) initOptions(seCtx sessionctx.Context, options []*plannercore.Load return exeerrors.ErrInvalidOptionVal.FastGenByArgs(opt.Name) } p.MaxRecordedErrors = vInt - // todo: set a max value for this param? } if _, ok := specifiedOptions[detachedOption]; ok { p.Detached = true @@ -969,7 +975,7 @@ func (e *LoadDataController) InitDataFiles(ctx context.Context) error { } // we add this check for security, we don't want user import any sensitive system files, // most of which is readable text file and don't have a suffix, such as /etc/passwd - if !slices.Contains([]string{".csv", ".sql", ".parquet"}, strings.ToLower(filepath.Ext(e.Path))) { + if !slices.Contains(supportedSuffixForServerDisk, strings.ToLower(filepath.Ext(e.Path))) { return exeerrors.ErrLoadDataInvalidURI.GenWithStackByArgs(plannercore.ImportIntoDataSource, "the file suffix is not supported when import from server disk") } @@ -1242,13 +1248,26 @@ func (e *LoadDataController) CreateColAssignExprs(sctx sessionctx.Context) ([]ex return res, allWarnings, nil } +func (e *LoadDataController) getBackendWorkerConcurrency() int { + // when using global sort, write&ingest step buffers KV data in memory, + // suppose cpu:mem ratio 1:2(true in most case), and we assign 1G per concurrency, + // so we can use 2 * threadCnt as concurrency. write&ingest step is mostly + // IO intensive, so CPU usage is below ThreadCnt in our tests. + // The real concurrency used is adjusted in external engine later. + // when using local sort, use the default value as lightning. + if e.IsGlobalSort() { + return int(e.ThreadCnt) * 2 + } + return config.DefaultRangeConcurrency * 2 +} + func (e *LoadDataController) getLocalBackendCfg(pdAddr, dataDir string) local.BackendConfig { backendConfig := local.BackendConfig{ PDAddr: pdAddr, LocalStoreDir: dataDir, MaxConnPerStore: config.DefaultRangeConcurrency, ConnCompressType: config.CompressionNone, - WorkerConcurrency: config.DefaultRangeConcurrency * 2, + WorkerConcurrency: e.getBackendWorkerConcurrency(), KVWriteBatchSize: config.KVWriteBatchSize, RegionSplitBatchSize: config.DefaultRegionSplitBatchSize, RegionSplitConcurrency: runtime.GOMAXPROCS(0), diff --git a/pkg/executor/importer/import_test.go b/pkg/executor/importer/import_test.go index baf3c0e25178d..4cb202c7356dd 100644 --- a/pkg/executor/importer/import_test.go +++ b/pkg/executor/importer/import_test.go @@ -18,6 +18,9 @@ import ( "context" "fmt" "net/url" + "os" + "path" + "path/filepath" "runtime" "testing" "time" @@ -34,9 +37,11 @@ import ( plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" + "go.uber.org/zap" ) func TestInitDefaultOptions(t *testing.T) { @@ -288,3 +293,101 @@ func TestGetLocalBackendCfg(t *testing.T) { require.Greater(t, cfg.RaftKV2SwitchModeDuration, time.Duration(0)) require.Equal(t, config.DefaultSwitchTiKVModeInterval, cfg.RaftKV2SwitchModeDuration) } + +func TestGetBackendWorkerConcurrency(t *testing.T) { + c := &LoadDataController{ + Plan: &Plan{ + ThreadCnt: 3, + }, + } + require.Equal(t, 32, c.getBackendWorkerConcurrency()) + c.Plan.CloudStorageURI = "xxx" + require.Equal(t, 6, c.getBackendWorkerConcurrency()) + c.Plan.ThreadCnt = 123 + require.Equal(t, 246, c.getBackendWorkerConcurrency()) +} + +func TestSupportedSuffixForServerDisk(t *testing.T) { + tempDir := t.TempDir() + ctx := context.Background() + + fileName := filepath.Join(tempDir, "test.csv") + require.NoError(t, os.WriteFile(fileName, []byte{}, 0o644)) + fileName2 := filepath.Join(tempDir, "test.csv.gz") + require.NoError(t, os.WriteFile(fileName2, []byte{}, 0o644)) + c := LoadDataController{ + Plan: &Plan{ + Format: DataFormatCSV, + InImportInto: true, + }, + logger: zap.NewExample(), + } + // no suffix + c.Path = filepath.Join(tempDir, "test") + require.ErrorIs(t, c.InitDataFiles(ctx), exeerrors.ErrLoadDataInvalidURI) + // unknown suffix + c.Path = filepath.Join(tempDir, "test.abc") + require.ErrorIs(t, c.InitDataFiles(ctx), exeerrors.ErrLoadDataInvalidURI) + c.Path = fileName + require.NoError(t, c.InitDataFiles(ctx)) + c.Path = fileName2 + require.NoError(t, c.InitDataFiles(ctx)) + + var allData []string + for i := 0; i < 3; i++ { + fileName := fmt.Sprintf("server-%d.csv", i) + var content []byte + rowCnt := 2 + for j := 0; j < rowCnt; j++ { + content = append(content, []byte(fmt.Sprintf("%d,test-%d\n", i*rowCnt+j, i*rowCnt+j))...) + allData = append(allData, fmt.Sprintf("%d test-%d", i*rowCnt+j, i*rowCnt+j)) + } + require.NoError(t, os.WriteFile(path.Join(tempDir, fileName), content, 0o644)) + } + // directory without permission + require.NoError(t, os.MkdirAll(path.Join(tempDir, "no-perm"), 0o700)) + require.NoError(t, os.WriteFile(path.Join(tempDir, "no-perm", "no-perm.csv"), []byte("1,1"), 0o644)) + require.NoError(t, os.Chmod(path.Join(tempDir, "no-perm"), 0o000)) + t.Cleanup(func() { + // make sure TempDir RemoveAll cleanup works + _ = os.Chmod(path.Join(tempDir, "no-perm"), 0o700) + }) + // file without permission + require.NoError(t, os.WriteFile(path.Join(tempDir, "no-perm.csv"), []byte("1,1"), 0o644)) + require.NoError(t, os.Chmod(path.Join(tempDir, "no-perm.csv"), 0o000)) + + // relative path + c.Path = "~/file.csv" + err2 := c.InitDataFiles(ctx) + require.ErrorIs(t, err2, exeerrors.ErrLoadDataInvalidURI) + require.ErrorContains(t, err2, "URI of data source is invalid") + // non-exist parent directory + c.Path = "/path/to/non/exists/file.csv" + err := c.InitDataFiles(ctx) + require.ErrorIs(t, err, exeerrors.ErrLoadDataInvalidURI) + require.ErrorContains(t, err, "no such file or directory") + // without permission to parent dir + c.Path = path.Join(tempDir, "no-perm", "no-perm.csv") + err = c.InitDataFiles(ctx) + require.ErrorIs(t, err, exeerrors.ErrLoadDataCantRead) + require.ErrorContains(t, err, "permission denied") + // file not exists + c.Path = path.Join(tempDir, "not-exists.csv") + err = c.InitDataFiles(ctx) + require.ErrorIs(t, err, exeerrors.ErrLoadDataCantRead) + require.ErrorContains(t, err, "no such file or directory") + // file without permission + c.Path = path.Join(tempDir, "no-perm.csv") + err = c.InitDataFiles(ctx) + require.ErrorIs(t, err, exeerrors.ErrLoadDataCantRead) + require.ErrorContains(t, err, "permission denied") + // we don't have read access to 'no-perm' directory, so walk-dir fails + c.Path = path.Join(tempDir, "server-*.csv") + err = c.InitDataFiles(ctx) + require.ErrorIs(t, err, exeerrors.ErrLoadDataCantRead) + require.ErrorContains(t, err, "permission denied") + // grant read access to 'no-perm' directory, should ok now. + require.NoError(t, os.Chmod(path.Join(tempDir, "no-perm"), 0o400)) + c.Path = path.Join(tempDir, "server-*.csv") + require.NoError(t, c.InitDataFiles(ctx)) +} diff --git a/pkg/executor/importer/precheck_test.go b/pkg/executor/importer/precheck_test.go index 802d3801893dd..9645365fbf055 100644 --- a/pkg/executor/importer/precheck_test.go +++ b/pkg/executor/importer/precheck_test.go @@ -50,9 +50,9 @@ func createMockETCD(t *testing.T) (string, *embed.Etcd) { randPort := int(rand.Int31n(40000)) + 20000 clientAddr := fmt.Sprintf(addrFmt, randPort) lcurl, _ := url.Parse(clientAddr) - cfg.LCUrls, cfg.ACUrls = []url.URL{*lcurl}, []url.URL{*lcurl} + cfg.ListenClientUrls, cfg.AdvertiseClientUrls = []url.URL{*lcurl}, []url.URL{*lcurl} lpurl, _ := url.Parse(fmt.Sprintf(addrFmt, randPort+1)) - cfg.LPUrls, cfg.APUrls = []url.URL{*lpurl}, []url.URL{*lpurl} + cfg.ListenPeerUrls, cfg.AdvertisePeerUrls = []url.URL{*lpurl}, []url.URL{*lpurl} cfg.InitialCluster = "default=" + lpurl.String() cfg.Logger = "zap" embedEtcd, err := embed.StartEtcd(cfg) diff --git a/pkg/executor/index_advise_test.go b/pkg/executor/index_advise_test.go index 20d5b94bf0fc0..a4cbc6a65dc38 100644 --- a/pkg/executor/index_advise_test.go +++ b/pkg/executor/index_advise_test.go @@ -65,135 +65,3 @@ func TestIndexAdvise(t *testing.T) { require.Equal(t, uint64(4), ia.MaxIndexNum.PerTable) require.Equal(t, uint64(5), ia.MaxIndexNum.PerDB) } - -func TestIndexJoinProjPattern(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_opt_advanced_join_hint=0;") - tk.MustExec(`create table t1( -pnbrn_cnaps varchar(5) not null, -new_accno varchar(18) not null, -primary key(pnbrn_cnaps,new_accno) nonclustered -);`) - tk.MustExec(`create table t2( -pnbrn_cnaps varchar(5) not null, -txn_accno varchar(18) not null, -txn_dt date not null, -yn_frz varchar(1) default null -);`) - tk.MustExec(`insert into t1(pnbrn_cnaps,new_accno) values ("40001","123")`) - tk.MustExec(`insert into t2(pnbrn_cnaps, txn_accno, txn_dt, yn_frz) values ("40001","123","20221201","0");`) - - sql := `update -/*+ inl_join(a) */ -t2 b, -( -select t1.pnbrn_cnaps, -t1.new_accno -from t1 -where t1.pnbrn_cnaps = '40001' -) a -set b.yn_frz = '1' -where b.txn_dt = str_to_date('20221201', '%Y%m%d') -and b.pnbrn_cnaps = a.pnbrn_cnaps -and b.txn_accno = a.new_accno;` - rows := [][]interface{}{ - {"Update_8"}, - {"└─IndexJoin_14"}, - {" ├─TableReader_23(Build)"}, - {" │ └─Selection_22"}, - {" │ └─TableFullScan_21"}, - {" └─IndexReader_12(Probe)"}, - {" └─IndexRangeScan_11"}, - } - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") - tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) - rows = [][]interface{}{ - {"Update_8"}, - {"└─HashJoin_12"}, - {" ├─TableReader_15(Build)"}, - {" │ └─Selection_14"}, - {" │ └─TableFullScan_13"}, - {" └─IndexReader_18(Probe)"}, - {" └─IndexRangeScan_17"}, - } - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") - tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) - - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") - tk.MustExec(sql) - tk.MustQuery("select yn_frz from t2").Check(testkit.Rows("1")) -} - -func TestIndexJoinSelPattern(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`set @@tidb_opt_advanced_join_hint=0`) - tk.MustExec(` create table tbl_miss( -id bigint(20) unsigned not null -,txn_dt date default null -,perip_sys_uuid varchar(32) not null -,rvrs_idr varchar(1) not null -,primary key(id) clustered -,key idx1 (txn_dt, perip_sys_uuid, rvrs_idr) -); -`) - tk.MustExec(`insert into tbl_miss (id,txn_dt,perip_sys_uuid,rvrs_idr) values (1,"20221201","123","1");`) - tk.MustExec(`create table tbl_src( -txn_dt date default null -,uuid varchar(32) not null -,rvrs_idr char(1) -,expd_inf varchar(5000) -,primary key(uuid,rvrs_idr) nonclustered -); -`) - tk.MustExec(`insert into tbl_src (txn_dt,uuid,rvrs_idr) values ("20221201","123","1");`) - sql := `select /*+ use_index(mis,) inl_join(src) */ - * - from tbl_miss mis - ,tbl_src src - where src.txn_dt >= str_to_date('20221201', '%Y%m%d') - and mis.id between 1 and 10000 - and mis.perip_sys_uuid = src.uuid - and mis.rvrs_idr = src.rvrs_idr - and mis.txn_dt = src.txn_dt - and ( - case when isnull(src.expd_inf) = 1 then '' - else - substr(concat_ws('',src.expd_inf,'~~'), - instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, - instr(substr(concat_ws('',src.expd_inf,'~~'), - instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) - end - ) != '01';` - rows := [][]interface{}{ - {"HashJoin_9"}, - {"├─TableReader_12(Build)"}, - {"│ └─Selection_11"}, - {"│ └─TableRangeScan_10"}, - {"└─Selection_13(Probe)"}, - {" └─TableReader_16"}, - {" └─Selection_15"}, - {" └─TableFullScan_14"}, - } - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") - tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) - rows = [][]interface{}{ - {"IndexJoin_13"}, - {"├─TableReader_25(Build)"}, - {"│ └─Selection_24"}, - {"│ └─TableRangeScan_23"}, - {"└─Selection_12(Probe)"}, - {" └─IndexLookUp_11"}, - {" ├─IndexRangeScan_8(Build)"}, - {" └─Selection_10(Probe)"}, - {" └─TableRowIDScan_9"}, - } - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'") - tk.MustQuery("explain "+sql).CheckAt([]int{0}, rows) - tk.MustQuery(sql).Check(testkit.Rows("1 2022-12-01 123 1 2022-12-01 123 1 ")) - tk.MustExec("set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'") - tk.MustQuery(sql).Check(testkit.Rows("1 2022-12-01 123 1 2022-12-01 123 1 ")) -} diff --git a/pkg/executor/index_lookup_hash_join.go b/pkg/executor/index_lookup_hash_join.go index 22eb3a16ae366..81314f20a714d 100644 --- a/pkg/executor/index_lookup_hash_join.go +++ b/pkg/executor/index_lookup_hash_join.go @@ -87,7 +87,6 @@ type indexHashJoinOuterWorker struct { type indexHashJoinInnerWorker struct { innerWorker - matchedOuterPtrs []chunk.RowPtr joiner joiner joinChkResourceCh chan *chunk.Chunk // resultCh is valid only when indexNestedLoopHashJoin do not need to keep @@ -189,6 +188,9 @@ func (e *IndexNestedLoopHashJoin) finishJoinWorkers(r interface{}) { if r != nil { e.IndexLookUpJoin.finished.Store(true) err := fmt.Errorf("%v", r) + if recoverdErr, ok := r.(error); ok { + err = recoverdErr + } if !e.keepOuterOrder { e.resultCh <- &indexHashJoinResult{err: err} } else { @@ -436,7 +438,6 @@ func (e *IndexNestedLoopHashJoin) newInnerWorker(taskCh chan *indexHashJoinTask, joiner: e.joiners[workerID], joinChkResourceCh: e.joinChkResourceCh[workerID], resultCh: e.resultCh, - matchedOuterPtrs: make([]chunk.RowPtr, 0, e.MaxChunkSize()), joinKeyBuf: make([]byte, 1), outerRowStatus: make([]outerRowStatusFlag, 0, e.MaxChunkSize()), rowIter: chunk.NewIterator4Slice([]chunk.Row{}).(*chunk.Iterator4Slice), @@ -712,15 +713,14 @@ func (iw *indexHashJoinInnerWorker) getMatchedOuterRows(innerRow chunk.Row, task if err != nil { return nil, nil, err } - iw.matchedOuterPtrs = task.lookupMap.Get(h.Sum64()) - if len(iw.matchedOuterPtrs) == 0 { + matchedOuterEntry := task.lookupMap.Get(h.Sum64()) + if matchedOuterEntry == nil { return nil, nil, nil } joinType := JoinerType(iw.joiner) isSemiJoin := joinType == plannercore.SemiJoin || joinType == plannercore.LeftOuterSemiJoin - matchedRows = make([]chunk.Row, 0, len(iw.matchedOuterPtrs)) - matchedRowPtr = make([]chunk.RowPtr, 0, len(iw.matchedOuterPtrs)) - for _, ptr := range iw.matchedOuterPtrs { + for ; matchedOuterEntry != nil; matchedOuterEntry = matchedOuterEntry.next { + ptr := matchedOuterEntry.ptr outerRow := task.outerResult.GetRow(ptr) ok, err := codec.EqualChunkRow(iw.ctx.GetSessionVars().StmtCtx, innerRow, iw.hashTypes, iw.hashCols, outerRow, iw.outerCtx.hashTypes, iw.outerCtx.hashCols) if err != nil { diff --git a/pkg/executor/index_lookup_join.go b/pkg/executor/index_lookup_join.go index ec6e6220bbea9..bb5aa2bd5c603 100644 --- a/pkg/executor/index_lookup_join.go +++ b/pkg/executor/index_lookup_join.go @@ -25,7 +25,6 @@ import ( "time" "unsafe" - "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" @@ -35,6 +34,7 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" @@ -371,7 +371,7 @@ func (ow *outerWorker) run(ctx context.Context, wg *sync.WaitGroup) { ow.lookup.finished.Store(true) logutil.Logger(ctx).Error("outerWorker panicked", zap.Any("recover", r), zap.Stack("stack")) task := &lookUpJoinTask{doneCh: make(chan error, 1)} - err := errors.Errorf("%v", r) + err := util.GetRecoverError(r) task.doneCh <- err ow.pushToChan(ctx, task, ow.resultCh) } @@ -489,7 +489,7 @@ func (iw *innerWorker) run(ctx context.Context, wg *sync.WaitGroup) { if r := recover(); r != nil { iw.lookup.finished.Store(true) logutil.Logger(ctx).Error("innerWorker panicked", zap.Any("recover", r), zap.Stack("stack")) - err := errors.Errorf("%v", r) + err := util.GetRecoverError(r) // "task != nil" is guaranteed when panic happened. task.doneCh <- err } @@ -627,7 +627,7 @@ func (iw *innerWorker) constructDatumLookupKey(task *lookUpJoinTask, chkIdx, row return nil, nil, nil } innerColType := iw.rowTypes[iw.hashCols[i]] - innerValue, err := outerValue.ConvertTo(sc, innerColType) + innerValue, err := outerValue.ConvertTo(sc.TypeCtx(), innerColType) if err != nil && !(terror.ErrorEqual(err, types.ErrTruncated) && (innerColType.GetType() == mysql.TypeSet || innerColType.GetType() == mysql.TypeEnum)) { // If the converted outerValue overflows or invalid to innerValue, we don't need to lookup it. if terror.ErrorEqual(err, types.ErrOverflow) || terror.ErrorEqual(err, types.ErrWarnDataOutOfRange) { @@ -635,7 +635,7 @@ func (iw *innerWorker) constructDatumLookupKey(task *lookUpJoinTask, chkIdx, row } return nil, nil, err } - cmp, err := outerValue.Compare(sc, &innerValue, iw.hashCollators[i]) + cmp, err := outerValue.Compare(sc.TypeCtx(), &innerValue, iw.hashCollators[i]) if err != nil { return nil, nil, err } @@ -675,7 +675,7 @@ func (iw *innerWorker) sortAndDedupLookUpContents(lookUpContents []*indexJoinLoo func compareRow(sc *stmtctx.StatementContext, left, right []types.Datum, ctors []collate.Collator) int { for idx := 0; idx < len(left); idx++ { - cmp, err := left[idx].Compare(sc, &right[idx], ctors[idx]) + cmp, err := left[idx].Compare(sc.TypeCtx(), &right[idx], ctors[idx]) // We only compare rows with the same type, no error to return. terror.Log(err) if cmp > 0 { diff --git a/pkg/executor/index_lookup_merge_join.go b/pkg/executor/index_lookup_merge_join.go index 2ff7729f239c2..8a43375a4bbdb 100644 --- a/pkg/executor/index_lookup_merge_join.go +++ b/pkg/executor/index_lookup_merge_join.go @@ -16,13 +16,11 @@ package executor import ( "context" - "fmt" "runtime/trace" "slices" "sync" "sync/atomic" - "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" @@ -31,6 +29,7 @@ import ( plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/channel" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" @@ -296,7 +295,7 @@ func (omw *outerMergeWorker) run(ctx context.Context, wg *sync.WaitGroup, cancel defer func() { if r := recover(); r != nil { task := &lookUpMergeJoinTask{ - doneErr: fmt.Errorf("%v", r), + doneErr: util.GetRecoverError(r), results: make(chan *indexMergeJoinResult, numResChkHold), } close(task.results) @@ -395,7 +394,7 @@ func (imw *innerMergeWorker) run(ctx context.Context, wg *sync.WaitGroup, cancel wg.Done() if r := recover(); r != nil { if task != nil { - task.doneErr = errors.Errorf("%v", r) + task.doneErr = util.GetRecoverError(r) close(task.results) } logutil.Logger(ctx).Error("innerMergeWorker panicked", zap.Any("recover", r), zap.Stack("stack")) @@ -672,7 +671,7 @@ func (imw *innerMergeWorker) constructDatumLookupKey(task *lookUpMergeJoinTask, return nil, nil } innerColType := imw.rowTypes[imw.keyCols[i]] - innerValue, err := outerValue.ConvertTo(sc, innerColType) + innerValue, err := outerValue.ConvertTo(sc.TypeCtx(), innerColType) if err != nil { // If the converted outerValue overflows, we don't need to lookup it. if terror.ErrorEqual(err, types.ErrOverflow) || terror.ErrorEqual(err, types.ErrWarnDataOutOfRange) { @@ -683,7 +682,7 @@ func (imw *innerMergeWorker) constructDatumLookupKey(task *lookUpMergeJoinTask, } return nil, err } - cmp, err := outerValue.Compare(sc, &innerValue, imw.keyCollators[i]) + cmp, err := outerValue.Compare(sc.TypeCtx(), &innerValue, imw.keyCollators[i]) if err != nil { return nil, err } diff --git a/pkg/executor/index_lookup_merge_join_test.go b/pkg/executor/index_lookup_merge_join_test.go index a7a8d5ce7e0c5..502064642c2f6 100644 --- a/pkg/executor/index_lookup_merge_join_test.go +++ b/pkg/executor/index_lookup_merge_join_test.go @@ -15,14 +15,10 @@ package executor_test import ( - "strings" "testing" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/testkit/testdata" - "github.com/pingcap/tidb/pkg/util/plancodec" "github.com/stretchr/testify/require" ) @@ -45,23 +41,6 @@ func TestIndexLookupMergeJoinHang(t *testing.T) { require.Equal(t, "OOM test index merge join doesn't hang here.", err.Error()) } -func TestIssue28052(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t` (" + - "`col_tinyint_key_signed` tinyint(4) DEFAULT NULL," + - "`col_year_key_signed` year(4) DEFAULT NULL," + - "KEY `col_tinyint_key_signed` (`col_tinyint_key_signed`)," + - "KEY `col_year_key_signed` (`col_year_key_signed`)" + - " ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin") - - tk.MustExec("insert into t values(-100,NULL);") - tk.MustQuery("select /*+ inl_merge_join(t1, t2) */ count(*) from t t1 right join t t2 on t1. `col_year_key_signed` = t2. `col_tinyint_key_signed`").Check(testkit.Rows("1")) -} - func TestIssue18068(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/testIssue18068", `return(true)`)) defer func() { @@ -85,146 +64,3 @@ func TestIssue18068(t *testing.T) { tk.MustExec("select /*+ inl_merge_join(s)*/ 1 from t join s on t.a = s.a limit 1") tk.MustExec("select /*+ inl_merge_join(s)*/ 1 from t join s on t.a = s.a limit 1") } - -func TestIssue18631(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int, c int, d int, primary key(a,b,c))") - tk.MustExec("create table t2(a int, b int, c int, d int, primary key(a,b,c))") - tk.MustExec("insert into t1 values(1,1,1,1),(2,2,2,2),(3,3,3,3)") - tk.MustExec("insert into t2 values(1,1,1,1),(2,2,2,2)") - firstOperator := tk.MustQuery("explain format = 'brief' select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc").Rows()[0][0].(string) - require.Equal(t, 0, strings.Index(firstOperator, plancodec.TypeIndexMergeJoin)) - tk.MustQuery("select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc").Check(testkit.Rows( - "3 3 3 3 ", - "2 2 2 2 2 2 2 2", - "1 1 1 1 1 1 1 1")) -} - -func TestIssue19408(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_int int, primary key(c_int))") - tk.MustExec("create table t2 (c_int int, unique key (c_int)) partition by hash (c_int) partitions 4") - tk.MustExec("insert into t1 values (1), (2), (3), (4), (5)") - tk.MustExec("insert into t2 select * from t1") - tk.MustExec("begin") - tk.MustExec("delete from t1 where c_int = 1") - tk.MustQuery("select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows( - "2 2", - "3 3", - "4 4", - "5 5")) - tk.MustQuery("select /*+ INL_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows( - "2 2", - "3 3", - "4 4", - "5 5")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int").Sort().Check(testkit.Rows( - "2 2", - "3 3", - "4 4", - "5 5")) - tk.MustExec("commit") -} - -func TestIssue20137(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (id bigint(20) unsigned, primary key(id))") - tk.MustExec("create table t2 (id bigint(20) unsigned)") - tk.MustExec("insert into t1 values (8738875760185212610)") - tk.MustExec("insert into t1 values (9814441339970117597)") - tk.MustExec("insert into t2 values (8738875760185212610)") - tk.MustExec("insert into t2 values (9814441339970117597)") - tk.MustQuery("select /*+ INL_MERGE_JOIN(t1, t2) */ * from t2 left join t1 on t1.id = t2.id order by t1.id").Check( - testkit.Rows("8738875760185212610 8738875760185212610", "9814441339970117597 9814441339970117597")) -} - -func TestIndexJoinOnSinglePartitionTable(t *testing.T) { - // For issue 19145 - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`set @@tidb_opt_advanced_join_hint=0`) - for _, val := range []string{string(variable.Static), string(variable.Dynamic)} { - tk.MustExec("set @@tidb_partition_prune_mode= '" + val + "'") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue )") - tk.MustExec("create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue )") - tk.MustExec("insert into t1 values (1, 'Alice')") - tk.MustExec("insert into t2 values (1, 'Bob')") - tk.MustExec("analyze table t1, t2") - sql := "select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str" - tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob")) - rows := testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows()) - // Partition table can't be inner side of index merge join, because it can't keep order. - require.Equal(t, -1, strings.Index(rows[0], "IndexMergeJoin")) - require.Equal(t, true, len(tk.MustQuery("show warnings").Rows()) > 0) - - sql = "select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str" - tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob")) - rows = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows()) - require.Equal(t, 0, strings.Index(rows[0], "IndexHashJoin")) - - sql = "select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str" - tk.MustQuery(sql).Check(testkit.Rows("1 Alice 1 Bob")) - rows = testdata.ConvertRowsToStrings(tk.MustQuery("explain format = 'brief' " + sql).Rows()) - require.Equal(t, 0, strings.Index(rows[0], "IndexJoin")) - } -} - -func TestIssue20400(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t, s") - tk.MustExec("create table s(a int, index(a))") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t values(1)") - tk.MustQuery("select /*+ hash_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1").Check( - testkit.Rows("1 ")) - tk.MustQuery("select /*+ inl_merge_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1").Check( - testkit.Rows("1 ")) -} - -func TestIssue20549(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("CREATE TABLE `t1` (`id` bigint(20) NOT NULL AUTO_INCREMENT, `t2id` bigint(20) DEFAULT NULL, PRIMARY KEY (`id`), KEY `t2id` (`t2id`));") - tk.MustExec("INSERT INTO `t1` VALUES (1,NULL);") - tk.MustExec("CREATE TABLE `t2` (`id` bigint(20) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`));") - tk.MustQuery("SELECT /*+ INL_MERGE_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id;").Check( - testkit.Rows("1")) - tk.MustQuery("SELECT /*+ HASH_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id;\n").Check( - testkit.Rows("1")) -} - -func TestIssue24473AndIssue25669(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists x, t2, t3") - tk.MustExec("CREATE TABLE `x` ( `a` enum('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`));") - tk.MustExec("insert into x values(\"x\"),(\"x\"),(\"b\"),(\"y\");") - tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( - testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) - tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( - testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) - - tk.MustExec("drop table if exists x, t2, t3") - tk.MustExec("CREATE TABLE `x` ( `a` set('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`));") - tk.MustExec("insert into x values(\"x\"),(\"x\"),(\"b\"),(\"y\");") - tk.MustQuery("SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( - testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) - tk.MustQuery("SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a;").Sort().Check( - testkit.Rows("b b", "x x", "x x", "x x", "x x", "y y")) -} diff --git a/pkg/executor/index_merge_reader.go b/pkg/executor/index_merge_reader.go index 4d3a7c2f3065c..67e481bf43ec8 100644 --- a/pkg/executor/index_merge_reader.go +++ b/pkg/executor/index_merge_reader.go @@ -882,7 +882,7 @@ func handleWorkerPanic(ctx context.Context, finished, limitDone <-chan struct{}, extraNotifyCh <- true } - err4Panic := errors.Errorf("%s: %v", worker, r) + err4Panic := util.GetRecoverError(r) logutil.Logger(ctx).Error(err4Panic.Error()) doneCh := make(chan error, 1) doneCh <- err4Panic diff --git a/pkg/executor/infoschema_cluster_table_test.go b/pkg/executor/infoschema_cluster_table_test.go index 9d25bb7e9626b..f4050860e4125 100644 --- a/pkg/executor/infoschema_cluster_table_test.go +++ b/pkg/executor/infoschema_cluster_table_test.go @@ -176,7 +176,7 @@ func (s *infosSchemaClusterTableSuite) setUpMockPDHTTPServer() (*httptest.Server // TiDB/TiKV config. router.Handle("/config", fn.Wrap(mockConfig)) // PD region. - router.Handle("/pd/api/v1/stats/region", fn.Wrap(func() (*helper.PDRegionStats, error) { + router.Handle(pdapi.RegionStats, fn.Wrap(func() (*helper.PDRegionStats, error) { return &helper.PDRegionStats{ Count: 1, EmptyCount: 1, diff --git a/pkg/executor/infoschema_reader_test.go b/pkg/executor/infoschema_reader_test.go index de8480dfac90a..4412db2021a52 100644 --- a/pkg/executor/infoschema_reader_test.go +++ b/pkg/executor/infoschema_reader_test.go @@ -81,217 +81,6 @@ func TestInspectionTables(t *testing.T) { tk.Session().GetSessionVars().InspectionTableCache = nil } -func TestProfiling(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select * from information_schema.profiling").Check(testkit.Rows()) - tk.MustExec("set @@profiling=1") - tk.MustQuery("select * from information_schema.profiling").Check(testkit.Rows("0 0 0 0 0 0 0 0 0 0 0 0 0 0 0")) -} - -func TestSchemataTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustQuery("select * from information_schema.SCHEMATA where schema_name='mysql';").Check( - testkit.Rows("def mysql utf8mb4 utf8mb4_bin ")) - - // Test the privilege of new user for information_schema.schemata. - tk.MustExec("create user schemata_tester") - schemataTester := testkit.NewTestKit(t, store) - schemataTester.MustExec("use information_schema") - require.NoError(t, schemataTester.Session().Auth(&auth.UserIdentity{ - Username: "schemata_tester", - Hostname: "127.0.0.1", - }, nil, nil, nil)) - schemataTester.MustQuery("select count(*) from information_schema.SCHEMATA;").Check(testkit.Rows("1")) - schemataTester.MustQuery("select * from information_schema.SCHEMATA where schema_name='mysql';").Check( - [][]interface{}{}) - schemataTester.MustQuery("select * from information_schema.SCHEMATA where schema_name='INFORMATION_SCHEMA';").Check( - testkit.Rows("def INFORMATION_SCHEMA utf8mb4 utf8mb4_bin ")) - - // Test the privilege of user with privilege of mysql for information_schema.schemata. - tk.MustExec("CREATE ROLE r_mysql_priv;") - tk.MustExec("GRANT ALL PRIVILEGES ON mysql.* TO r_mysql_priv;") - tk.MustExec("GRANT r_mysql_priv TO schemata_tester;") - schemataTester.MustExec("set role r_mysql_priv") - schemataTester.MustQuery("select count(*) from information_schema.SCHEMATA;").Check(testkit.Rows("2")) - schemataTester.MustQuery("select * from information_schema.SCHEMATA;").Check( - testkit.Rows("def INFORMATION_SCHEMA utf8mb4 utf8mb4_bin ", "def mysql utf8mb4 utf8mb4_bin ")) -} - -func TestTableIDAndIndexID(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("drop table if exists test.t") - tk.MustExec("create table test.t (a int, b int, primary key(a), key k1(b))") - tk.MustQuery("select index_id from information_schema.tidb_indexes where table_schema = 'test' and table_name = 't'").Check(testkit.Rows("0", "1")) - tblID, err := strconv.Atoi(tk.MustQuery("select tidb_table_id from information_schema.tables where table_schema = 'test' and table_name = 't'").Rows()[0][0].(string)) - require.NoError(t, err) - require.Greater(t, tblID, 0) -} - -func TestSchemataCharacterSet(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE DATABASE `foo` DEFAULT CHARACTER SET = 'utf8mb4'") - tk.MustQuery("select default_character_set_name, default_collation_name FROM information_schema.SCHEMATA WHERE schema_name = 'foo'").Check( - testkit.Rows("utf8mb4 utf8mb4_bin")) - tk.MustExec("drop database `foo`") -} - -func TestViews(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE DEFINER='root'@'localhost' VIEW test.v1 AS SELECT 1") - tk.MustQuery("select TABLE_COLLATION is null from INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW'").Check(testkit.Rows("1", "1")) - tk.MustQuery("SELECT * FROM information_schema.views WHERE table_schema='test' AND table_name='v1'").Check(testkit.Rows("def test v1 SELECT 1 AS `1` CASCADED NO root@localhost DEFINER utf8mb4 utf8mb4_bin")) - tk.MustQuery("SELECT table_catalog, table_schema, table_name, table_type, engine, version, row_format, table_rows, avg_row_length, data_length, max_data_length, index_length, data_free, auto_increment, update_time, check_time, table_collation, checksum, create_options, table_comment FROM information_schema.tables WHERE table_schema='test' AND table_name='v1'").Check(testkit.Rows("def test v1 VIEW VIEW")) -} - -func TestColumnsTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (bit bit(10) DEFAULT b'100')") - tk.MustQuery("SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't'").Check(testkit.Rows( - "def test t bit 1 b'100' YES bit 10 0 bit(10) select,insert,update,references ")) - tk.MustExec("drop table if exists t") - - tk.MustExec("set time_zone='+08:00'") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (b timestamp(3) NOT NULL DEFAULT '1970-01-01 08:00:01.000')") - tk.MustQuery("select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='test';").Check(testkit.Rows("1970-01-01 08:00:01.000")) - tk.MustExec("set time_zone='+04:00'") - tk.MustQuery("select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='test';").Check(testkit.Rows("1970-01-01 04:00:01.000")) - tk.MustExec("set time_zone=default") - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a bit DEFAULT (rand()))") - tk.MustQuery("select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='test';").Check(testkit.Rows("rand()")) - - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE t (`COL3` bit(1) NOT NULL,b year) ;") - tk.MustQuery("select column_type from information_schema.columns where TABLE_SCHEMA = 'test' and TABLE_NAME = 't';"). - Check(testkit.Rows("bit(1)", "year(4)")) - - // For issue: https://github.com/pingcap/tidb/issues/43379 - tk.MustQuery("select ordinal_position from information_schema.columns where table_schema=database() and table_name='t' and column_name='b'"). - Check(testkit.Rows("2")) -} - -func TestEngines(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select * from information_schema.ENGINES;").Check(testkit.Rows("InnoDB DEFAULT Supports transactions, row-level locking, and foreign keys YES YES YES")) -} - -// https://github.com/pingcap/tidb/issues/25467. -func TestDataTypesMaxLengthAndOctLength(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("drop database if exists test_oct_length;") - tk.MustExec("create database test_oct_length;") - tk.MustExec("use test_oct_length;") - - testCases := []struct { - colTp string - maxLen int - octLen int - }{ - {"varchar(255) collate ascii_bin", 255, 255}, - {"varchar(255) collate utf8mb4_bin", 255, 255 * 4}, - {"varchar(255) collate utf8_bin", 255, 255 * 3}, - {"char(10) collate ascii_bin", 10, 10}, - {"char(10) collate utf8mb4_bin", 10, 10 * 4}, - {"set('a', 'b', 'cccc') collate ascii_bin", 8, 8}, - {"set('a', 'b', 'cccc') collate utf8mb4_bin", 8, 8 * 4}, - {"enum('a', 'b', 'cccc') collate ascii_bin", 4, 4}, - {"enum('a', 'b', 'cccc') collate utf8mb4_bin", 4, 4 * 4}, - } - for _, tc := range testCases { - createSQL := fmt.Sprintf("create table t (a %s);", tc.colTp) - tk.MustExec(createSQL) - result := tk.MustQuery("select character_maximum_length, character_octet_length " + - "from information_schema.columns " + - "where table_schema=(select database()) and table_name='t';") - expectedRows := testkit.Rows(fmt.Sprintf("%d %d", tc.maxLen, tc.octLen)) - result.Check(expectedRows) - tk.MustExec("drop table t;") - } -} - -func TestDDLJobs(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("create database if not exists test_ddl_jobs") - tk.MustQuery("select db_name, job_type from information_schema.DDL_JOBS limit 1").Check( - testkit.Rows("test_ddl_jobs create schema")) - - tk.MustExec("use test_ddl_jobs") - tk.MustExec("create table t (a int);") - tk.MustQuery("select db_name, table_name, job_type from information_schema.DDL_JOBS where table_name = 't'").Check( - testkit.Rows("test_ddl_jobs t create table")) - - tk.MustQuery("select job_type from information_schema.DDL_JOBS group by job_type having job_type = 'create table'").Check( - testkit.Rows("create table")) - - // Test the START_TIME and END_TIME field. - tk.MustQuery("select distinct job_type from information_schema.DDL_JOBS where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s')").Check( - testkit.Rows("create table")) - - // Test the privilege of new user for information_schema.DDL_JOBS. - tk.MustExec("create user DDL_JOBS_tester") - DDLJobsTester := testkit.NewTestKit(t, store) - DDLJobsTester.MustExec("use information_schema") - require.NoError(t, DDLJobsTester.Session().Auth(&auth.UserIdentity{ - Username: "DDL_JOBS_tester", - Hostname: "127.0.0.1", - }, nil, nil, nil)) - - // Test the privilege of user for information_schema.ddl_jobs. - DDLJobsTester.MustQuery("select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't';").Check( - [][]interface{}{}) - tk.MustExec("CREATE ROLE r_priv;") - tk.MustExec("GRANT ALL PRIVILEGES ON test_ddl_jobs.* TO r_priv;") - tk.MustExec("GRANT r_priv TO DDL_JOBS_tester;") - DDLJobsTester.MustExec("set role r_priv") - DDLJobsTester.MustQuery("select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't';").Check( - testkit.Rows("test_ddl_jobs t")) - - tk.MustExec("create table tt (a int);") - tk.MustExec("alter table tt add index t(a), add column b int") - tk.MustQuery("select db_name, table_name, job_type from information_schema.DDL_JOBS limit 3").Check( - testkit.Rows("test_ddl_jobs tt alter table multi-schema change", "test_ddl_jobs tt add column /* subjob */", "test_ddl_jobs tt add index /* subjob */ /* txn-merge */")) -} - -func TestKeyColumnUsage(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustQuery("select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta' and COLUMN_NAME='table_id';").Check( - testkit.Rows("def mysql tbl def mysql stats_meta table_id 1 ")) - - // test the privilege of new user for information_schema.table_constraints - tk.MustExec("create user key_column_tester") - keyColumnTester := testkit.NewTestKit(t, store) - keyColumnTester.MustExec("use information_schema") - require.NoError(t, keyColumnTester.Session().Auth(&auth.UserIdentity{ - Username: "key_column_tester", - Hostname: "127.0.0.1", - }, nil, nil, nil)) - keyColumnTester.MustQuery("select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME != 'CLUSTER_SLOW_QUERY';").Check([][]interface{}{}) - - // test the privilege of user with privilege of mysql.gc_delete_range for information_schema.table_constraints - tk.MustExec("CREATE ROLE r_stats_meta ;") - tk.MustExec("GRANT ALL PRIVILEGES ON mysql.stats_meta TO r_stats_meta;") - tk.MustExec("GRANT r_stats_meta TO key_column_tester;") - keyColumnTester.MustExec("set role r_stats_meta") - rows := keyColumnTester.MustQuery("select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta';").Rows() - require.Greater(t, len(rows), 0) -} - func TestUserPrivileges(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -359,29 +148,6 @@ func TestUserPrivileges(t *testing.T) { require.Greater(t, len(rows), 0) } -func TestUserPrivilegesTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk1 := testkit.NewTestKit(t, store) - - // test the privilege of new user for information_schema.user_privileges - tk.MustExec("create user usageuser") - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{ - Username: "usageuser", - Hostname: "127.0.0.1", - }, nil, nil, nil)) - tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def USAGE NO")) - // the usage row disappears when there is a non-dynamic privilege added - tk1.MustExec("GRANT SELECT ON *.* to usageuser") - tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def SELECT NO")) - // test grant privilege - tk1.MustExec("GRANT SELECT ON *.* to usageuser WITH GRANT OPTION") - tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"`).Check(testkit.Rows("'usageuser'@'%' def SELECT YES")) - // test DYNAMIC privs - tk1.MustExec("GRANT BACKUP_ADMIN ON *.* to usageuser") - tk.MustQuery(`SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'" ORDER BY privilege_type`).Check(testkit.Rows("'usageuser'@'%' def BACKUP_ADMIN NO", "'usageuser'@'%' def SELECT YES")) -} - func TestDataForTableStatsField(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) h := dom.StatsHandle() @@ -504,52 +270,6 @@ func TestPartitionsTable(t *testing.T) { tk.MustExec("drop table test_partitions") } -// https://github.com/pingcap/tidb/issues/32693. -func TestPartitionTablesStatsCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - tk.MustExec(` -CREATE TABLE e ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)) PARTITION BY RANGE (id) ( - PARTITION p0 VALUES LESS THAN (50), - PARTITION p1 VALUES LESS THAN (100), - PARTITION p2 VALUES LESS THAN (150), - PARTITION p3 VALUES LESS THAN (MAXVALUE));`) - tk.MustExec(`CREATE TABLE e2 ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30));`) - // Load the stats cache. - tk.MustQuery(`SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e';`) - // p0: 1 row, p3: 3 rows - tk.MustExec(`INSERT INTO e VALUES (1669, "Jim", "Smith"), (337, "Mary", "Jones"), (16, "Frank", "White"), (2005, "Linda", "Black");`) - tk.MustExec(`set tidb_enable_exchange_partition='on';`) - tk.MustExec(`ALTER TABLE e EXCHANGE PARTITION p0 WITH TABLE e2;`) - // p0: 1 rows, p3: 3 rows - tk.MustExec(`INSERT INTO e VALUES (41, "Michael", "Green");`) - tk.MustExec(`analyze table e;`) // The stats_meta should be effective immediately. - tk.MustQuery(`SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e';`). - Check(testkit.Rows("p0 1", "p1 0", "p2 0", "p3 3")) -} - -func TestMetricTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use information_schema") - tk.MustQuery("select count(*) > 0 from `METRICS_TABLES`").Check(testkit.Rows("1")) - tk.MustQuery("select * from `METRICS_TABLES` where table_name='tidb_qps'"). - Check(testkit.RowsWithSep("|", "tidb_qps|sum(rate(tidb_server_query_total{$LABEL_CONDITIONS}[$RANGE_DURATION])) by (result,type,instance)|instance,type,result|0|TiDB query processing numbers per second")) -} - -func TestTableConstraintsTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select * from information_schema.TABLE_CONSTRAINTS where TABLE_NAME='gc_delete_range';").Check(testkit.Rows("def mysql delete_range_index mysql gc_delete_range UNIQUE")) -} - -func TestTableSessionVar(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select * from information_schema.SESSION_VARIABLES where VARIABLE_NAME='tidb_retry_limit';").Check(testkit.Rows("tidb_retry_limit 10")) -} - func TestForAnalyzeStatus(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) @@ -640,19 +360,6 @@ func TestForServersInfo(t *testing.T) { require.Equal(t, stringutil.BuildStringFromLabels(info.Labels), rows[0][8]) } -func TestSequences(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE SEQUENCE test.seq maxvalue 10000000") - tk.MustQuery("SELECT * FROM information_schema.sequences WHERE sequence_schema='test' AND sequence_name='seq'").Check(testkit.Rows("def test seq 1 1000 0 1 10000000 1 1 ")) - tk.MustExec("DROP SEQUENCE test.seq") - tk.MustExec("CREATE SEQUENCE test.seq start = -1 minvalue -1 maxvalue 10 increment 1 cache 10") - tk.MustQuery("SELECT * FROM information_schema.sequences WHERE sequence_schema='test' AND sequence_name='seq'").Check(testkit.Rows("def test seq 1 10 0 1 10 -1 -1 ")) - tk.MustExec("CREATE SEQUENCE test.seq2 start = -9 minvalue -10 maxvalue 10 increment -1 cache 15") - tk.MustQuery("SELECT * FROM information_schema.sequences WHERE sequence_schema='test' AND sequence_name='seq2'").Check(testkit.Rows("def test seq2 1 15 0 -1 10 -10 -9 ")) - tk.MustQuery("SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME , TABLE_TYPE, ENGINE, TABLE_ROWS FROM information_schema.tables WHERE TABLE_TYPE='SEQUENCE' AND TABLE_NAME='seq2'").Check(testkit.Rows("def test seq2 SEQUENCE InnoDB 1")) -} - func TestTiFlashSystemTableWithTiFlashV620(t *testing.T) { instances := []string{ "tiflash,127.0.0.1:3933,127.0.0.1:7777,,", @@ -770,21 +477,6 @@ func TestTiFlashSystemTableWithTiFlashV640(t *testing.T) { tk.MustQuery("show warnings").Check(testkit.Rows()) } -func TestTablesPKType(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t_int (a int primary key, b int)") - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_int'").Check(testkit.Rows("CLUSTERED")) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly - tk.MustExec("create table t_implicit (a varchar(64) primary key, b int)") - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_implicit'").Check(testkit.Rows("NONCLUSTERED")) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("create table t_common (a varchar(64) primary key, b int)") - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'test' and table_name = 't_common'").Check(testkit.Rows("CLUSTERED")) - tk.MustQuery("SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'INFORMATION_SCHEMA' and table_name = 'TABLES'").Check(testkit.Rows("NONCLUSTERED")) -} - // https://github.com/pingcap/tidb/issues/32459. func TestJoinSystemTableContainsView(t *testing.T) { store := testkit.CreateMockStore(t) @@ -860,16 +552,6 @@ func TestShowColumnsWithSubQueryView(t *testing.T) { require.NoError(t, failpoint.Disable("tikvclient/tikvStoreSendReqResult")) } -func TestNullColumns(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("CREATE TABLE t ( id int DEFAULT NULL);") - tk.MustExec("CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`1.1.1.1` SQL SECURITY DEFINER VIEW `v_test` (`type`) AS SELECT NULL AS `type` FROM `t` AS `f`;") - tk.MustQuery("select * from information_schema.columns where TABLE_SCHEMA = 'test' and TABLE_NAME = 'v_test';"). - Check(testkit.Rows("def test v_test type 1 YES binary 0 0 binary(0) select,insert,update,references ")) -} - // Code below are helper utilities for the test cases. type getTiFlashSystemTableRequestMocker struct { diff --git a/pkg/executor/insert_common.go b/pkg/executor/insert_common.go index f7f1c48d525ae..9a2d4d25ceef3 100644 --- a/pkg/executor/insert_common.go +++ b/pkg/executor/insert_common.go @@ -1368,7 +1368,7 @@ func (e *InsertValues) equalDatumsAsBinary(a []types.Datum, b []types.Datum) (bo return false, nil } for i, ai := range a { - v, err := ai.Compare(e.Ctx().GetSessionVars().StmtCtx, &b[i], collate.GetBinaryCollator()) + v, err := ai.Compare(e.Ctx().GetSessionVars().StmtCtx.TypeCtx(), &b[i], collate.GetBinaryCollator()) if err != nil { return false, errors.Trace(err) } diff --git a/pkg/executor/insert_test.go b/pkg/executor/insert_test.go index 69376dc386999..523f0ebfcdd1a 100644 --- a/pkg/executor/insert_test.go +++ b/pkg/executor/insert_test.go @@ -16,9 +16,7 @@ package executor_test import ( "fmt" - "math" "strconv" - "strings" "testing" "time" @@ -256,407 +254,6 @@ func testInsertOnDuplicateKey(t *testing.T, tk *testkit.TestKit) { tk.MustQuery(`select * from t1 use index(primary)`).Check(testkit.Rows(`1.0000`)) } -func TestClusterIndexInsertOnDuplicateKey(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("drop database if exists cluster_index_duplicate_entry_error;") - tk.MustExec("create database cluster_index_duplicate_entry_error;") - tk.MustExec("use cluster_index_duplicate_entry_error;") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec("create table t(a char(20), b int, primary key(a));") - tk.MustExec("insert into t values('aa', 1), ('bb', 1);") - tk.MustMatchErrMsg("insert into t values('aa', 2);", ".*Duplicate entry 'aa' for.*") - - tk.MustExec("drop table t;") - tk.MustExec("create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c));") - tk.MustExec("insert into t values ('a', 'b', 'c'), ('b', 'a', 'c');") - tk.MustMatchErrMsg("insert into t values ('a', 'b', 'c');", ".*Duplicate entry 'a-b-c' for.*") -} - -func TestPaddingCommonHandle(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("drop table if exists t1;") - tk.MustExec(`create table t1(c1 decimal(6,4), primary key(c1))`) - tk.MustExec(`insert into t1 set c1 = 0.1`) - tk.MustExec(`insert into t1 set c1 = 0.1 on duplicate key update c1 = 1`) - tk.MustQuery(`select * from t1`).Check(testkit.Rows(`1.0000`)) -} - -func TestInsertReorgDelete(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - inputs := []struct { - typ string - dat string - }{ - {"year", "'2004'"}, - {"year", "2004"}, - {"bit", "1"}, - {"smallint unsigned", "1"}, - {"int unsigned", "1"}, - {"smallint", "-1"}, - {"int", "-1"}, - {"decimal(6,4)", "'1.1'"}, - {"decimal", "1.1"}, - {"numeric", "-1"}, - {"float", "1.2"}, - {"double", "1.2"}, - {"double", "1.3"}, - {"real", "1.4"}, - {"date", "'2020-01-01'"}, - {"time", "'20:00:00'"}, - {"datetime", "'2020-01-01 22:22:22'"}, - {"timestamp", "'2020-01-01 22:22:22'"}, - {"year", "'2020'"}, - {"char(15)", "'test'"}, - {"varchar(15)", "'test'"}, - {"binary(3)", "'a'"}, - {"varbinary(3)", "'b'"}, - {"blob", "'test'"}, - {"text", "'test'"}, - {"enum('a', 'b')", "'a'"}, - {"set('a', 'b')", "'a,b'"}, - } - - for _, i := range inputs { - tk.MustExec(`drop table if exists t1`) - tk.MustExec(fmt.Sprintf(`create table t1(c1 %s)`, i.typ)) - tk.MustExec(fmt.Sprintf(`insert into t1 set c1 = %s`, i.dat)) - switch i.typ { - case "blob", "text": - tk.MustExec(`alter table t1 add index idx(c1(3))`) - default: - tk.MustExec(`alter table t1 add index idx(c1)`) - } - tk.MustExec(`delete from t1`) - tk.MustExec(`admin check table t1`) - } -} - -func TestUpdateDuplicateKey(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table c(i int,j int,k int,primary key(i,j,k));`) - tk.MustExec(`insert into c values(1,2,3);`) - tk.MustExec(`insert into c values(1,2,4);`) - tk.MustGetErrMsg(`update c set i=1,j=2,k=4 where i=1 and j=2 and k=3;`, - "[kv:1062]Duplicate entry '1-2-4' for key 'c.PRIMARY'") -} - -func TestIssue37187(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("drop table if exists a, b") - tk.MustExec("create table t1 (a int(11) ,b varchar(100) ,primary key (a));") - tk.MustExec("create table t2 (c int(11) ,d varchar(100) ,primary key (c));") - tk.MustExec("prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d';") - err := tk.ExecToErr("execute in1;") - require.NoError(t, err) -} - -func TestInsertWrongValueForField(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a bigint);`) - tk.MustGetErrCode(`insert into t1 values("asfasdfsajhlkhlksdaf");`, errno.ErrTruncatedWrongValueForField) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a varchar(10)) charset ascii;`) - tk.MustGetErrCode(`insert into t1 values('我');`, errno.ErrTruncatedWrongValueForField) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a char(10) charset utf8);`) - tk.MustExec(`insert into t1 values('我');`) - tk.MustExec(`alter table t1 add column b char(10) charset ascii as ((a));`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows("我 ?")) - - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t (a year);`) - tk.MustGetErrMsg(`insert into t values(2156);`, - "[types:1264]Out of range value for column 'a' at row 1") - - tk.MustExec(`DROP TABLE IF EXISTS ts`) - tk.MustExec(`CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL)`) - tk.MustExec(`SET @@sql_mode=''`) - tk.MustExec(`INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00')`) - tk.MustQuery(`SHOW WARNINGS`).Check(testkit.Rows(`Warning 1292 Incorrect timestamp value: '1018-12-23 00:00:00' for column 'time1' at row 1`)) - tk.MustQuery(`SELECT * FROM ts ORDER BY id`).Check(testkit.Rows(`1 0000-00-00 00:00:00`)) - - tk.MustExec(`SET @@sql_mode='STRICT_TRANS_TABLES'`) - tk.MustGetErrMsg(`INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00')`, `[table:1292]Incorrect timestamp value: '1018-12-24 00:00:00' for column 'time1' at row 1`) - tk.MustExec(`DROP TABLE ts`) - - tk.MustExec(`CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY);`) - tk.MustExec(`INSERT IGNORE INTO t0(c0) VALUES (194626268);`) - tk.MustExec(`INSERT IGNORE INTO t0(c0) VALUES ('*')`) - tk.MustQuery(`SHOW WARNINGS`).Check(testkit.Rows( - `Warning 1366 Incorrect smallint value: '*' for column 'c0' at row 1`, - `Warning 1690 constant 32768 overflows smallint`, - `Warning 1467 Failed to read auto-increment value from storage engine`)) -} - -func TestInsertValueForCastDecimalField(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a decimal(15,2));`) - tk.MustExec(`insert into t1 values (1111111111111.01);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`1111111111111.01`)) - tk.MustQuery(`select cast(a as decimal) from t1;`).Check(testkit.Rows(`9999999999`)) -} - -func TestInsertForMultiValuedIndex(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a json, b int, unique index idx((cast(a as signed array))));`) - tk.MustExec(`insert into t1 values ('[1,11]', 1);`) - tk.MustExec(`insert into t1 values ('[2, 22]', 2);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 2`)) - tk.MustGetErrMsg(`insert into t1 values ('[2, 222]', 2);`, "[kv:1062]Duplicate entry '2' for key 't1.idx'") - tk.MustExec(`replace into t1 values ('[1, 10]', 10)`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[2, 22] 2`, `[1, 10] 10`)) - tk.MustExec(`replace into t1 values ('[1, 2]', 1)`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 2] 1`)) - tk.MustExec(`replace into t1 values ('[1, 11]', 1)`) - tk.MustExec(`insert into t1 values ('[2, 22]', 2);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 2`)) - tk.MustExec(`insert ignore into t1 values ('[1]', 2);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 2`)) - tk.MustExec(`insert ignore into t1 values ('[1, 2]', 2);`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 2`)) - tk.MustExec(`insert into t1 values ('[2]', 2) on duplicate key update b = 10;`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows(`[1, 11] 1`, `[2, 22] 10`)) - tk.MustGetErrMsg(`insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]';`, "[kv:1062]Duplicate entry '[1, 2]' for key 't1.idx'") - tk.MustGetErrMsg(`insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]';`, "[kv:1062]Duplicate entry '[1, 2]' for key 't1.idx'") - tk.MustGetErrMsg(`insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]';`, "[kv:1062]Duplicate entry '[1, 2]' for key 't1.idx'") -} - -func TestInsertDateTimeWithTimeZone(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec(`use test;`) - tk.MustExec(`set time_zone="+09:00";`) - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP);`) - tk.MustExec(`set TIMESTAMP = 1234;`) - tk.MustExec(`insert t (id) values (1);`) - - tk.MustQuery(`select * from t;`).Check(testkit.Rows( - `1 1970-01-01 09:20:34`, - )) - - // test for ambiguous cases - cases := []struct { - lit string - expect string - }{ - {"2020-10-22", "2020-10-22 00:00:00"}, - {"2020-10-22-16", "2020-10-22 16:00:00"}, - {"2020-10-22 16-31", "2020-10-22 16:31:00"}, - {"2020-10-22 16:31-15", "2020-10-22 16:31:15"}, - {"2020-10-22T16:31:15-10", "2020-10-23 10:31:15"}, - - {"2020.10-22", "2020-10-22 00:00:00"}, - {"2020-10.22-16", "2020-10-22 16:00:00"}, - {"2020-10-22.16-31", "2020-10-22 16:31:00"}, - {"2020-10-22 16.31-15", "2020-10-22 16:31:15"}, - {"2020-10-22T16.31.15+14", "2020-10-22 10:31:15"}, - - {"2020-10:22", "2020-10-22 00:00:00"}, - {"2020-10-22:16", "2020-10-22 16:00:00"}, - {"2020-10-22-16:31", "2020-10-22 16:31:00"}, - {"2020-10-22 16-31:15", "2020-10-22 16:31:15"}, - {"2020-10-22T16.31.15+09:30", "2020-10-22 15:01:15"}, - - {"2020.10-22:16", "2020-10-22 16:00:00"}, - {"2020-10.22-16:31", "2020-10-22 16:31:00"}, - {"2020-10-22.16-31:15", "2020-10-22 16:31:15"}, - {"2020-10-22T16:31.15+09:30", "2020-10-22 15:01:15"}, - } - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t (dt datetime)`) - tk.MustExec(`set @@time_zone='+08:00'`) - for _, ca := range cases { - tk.MustExec(`delete from t`) - tk.MustExec(fmt.Sprintf("insert into t values ('%s')", ca.lit)) - tk.MustQuery(`select * from t`).Check(testkit.Rows(ca.expect)) - } - - // test for time zone change - tzcCases := []struct { - tz1 string - lit string - tz2 string - exp1 string - exp2 string - }{ - {"+08:00", "2020-10-22T16:53:40Z", "+00:00", "2020-10-23 00:53:40", "2020-10-22 16:53:40"}, - {"-08:00", "2020-10-22T16:53:40Z", "+08:00", "2020-10-22 08:53:40", "2020-10-23 00:53:40"}, - {"-03:00", "2020-10-22T16:53:40+03:00", "+08:00", "2020-10-22 10:53:40", "2020-10-22 21:53:40"}, - {"+08:00", "2020-10-22T16:53:40+08:00", "+08:00", "2020-10-22 16:53:40", "2020-10-22 16:53:40"}, - } - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (dt datetime, ts timestamp)") - for _, ca := range tzcCases { - tk.MustExec("delete from t") - tk.MustExec(fmt.Sprintf("set @@time_zone='%s'", ca.tz1)) - tk.MustExec(fmt.Sprintf("insert into t values ('%s', '%s')", ca.lit, ca.lit)) - tk.MustExec(fmt.Sprintf("set @@time_zone='%s'", ca.tz2)) - tk.MustQuery("select * from t").Check(testkit.Rows(ca.exp1 + " " + ca.exp2)) - } - - // test for datetime in compare - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (ts timestamp)") - tk.MustExec("insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z')") - tk.MustQuery("select count(*) from t where ts > '2020-10-22T12:00:00Z'").Check(testkit.Rows("2")) - - // test for datetime with fsp - fspCases := []struct { - fsp uint - lit string - exp1 string - exp2 string - }{ - {2, "2020-10-27T14:39:10.10+00:00", "2020-10-27 22:39:10.10", "2020-10-27 22:39:10.10"}, - {1, "2020-10-27T14:39:10.3+0200", "2020-10-27 20:39:10.3", "2020-10-27 20:39:10.3"}, - {6, "2020-10-27T14:39:10.3-02", "2020-10-28 00:39:10.300000", "2020-10-28 00:39:10.300000"}, - {2, "2020-10-27T14:39:10.10Z", "2020-10-27 22:39:10.10", "2020-10-27 22:39:10.10"}, - } - - tk.MustExec("set @@time_zone='+08:00'") - for _, ca := range fspCases { - tk.MustExec("drop table if exists t") - tk.MustExec(fmt.Sprintf("create table t (dt datetime(%d), ts timestamp(%d))", ca.fsp, ca.fsp)) - tk.MustExec(fmt.Sprintf("insert into t values ('%s', '%s')", ca.lit, ca.lit)) - tk.MustQuery("select * from t").Check(testkit.Rows(ca.exp1 + " " + ca.exp2)) - } -} - -func TestInsertZeroYear(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`create table t1(a year(4));`) - tk.MustExec(`insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79");`) - tk.MustQuery(`select * from t1;`).Check(testkit.Rows( - `0`, - `0`, - `0`, - `2000`, - `2000`, - `2000`, - `1979`, - `1979`, - )) - - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;`) - tk.MustExec(`insert into t values();`) - tk.MustQuery(`select * from t;`).Check(testkit.Rows( - `0`, - )) - tk.MustExec(`insert into t values('0000');`) - tk.MustQuery(`select * from t;`).Check(testkit.Rows( - `0`, - `0`, - )) -} - -func TestAllowInvalidDates(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t1, t2, t3, t4;`) - tk.MustExec(`create table t1(d date);`) - tk.MustExec(`create table t2(d datetime);`) - tk.MustExec(`create table t3(d date);`) - tk.MustExec(`create table t4(d datetime);`) - - runWithMode := func(mode string) { - inputs := []string{"0000-00-00", "2019-00-00", "2019-01-00", "2019-00-01", "2019-02-31"} - results := testkit.Rows(`0 0 0`, `2019 0 0`, `2019 1 0`, `2019 0 1`, `2019 2 31`) - oldMode := tk.MustQuery(`select @@sql_mode`).Rows()[0][0] - defer func() { - tk.MustExec(fmt.Sprintf(`set sql_mode='%s'`, oldMode)) - }() - - tk.MustExec(`truncate t1;truncate t2;truncate t3;truncate t4;`) - tk.MustExec(fmt.Sprintf(`set sql_mode='%s';`, mode)) - for _, input := range inputs { - tk.MustExec(fmt.Sprintf(`insert into t1 values ('%s')`, input)) - tk.MustExec(fmt.Sprintf(`insert into t2 values ('%s')`, input)) - } - tk.MustQuery(`select year(d), month(d), day(d) from t1;`).Check(results) - tk.MustQuery(`select year(d), month(d), day(d) from t2;`).Check(results) - tk.MustExec(`insert t3 select d from t1;`) - tk.MustQuery(`select year(d), month(d), day(d) from t3;`).Check(results) - tk.MustExec(`insert t4 select d from t2;`) - tk.MustQuery(`select year(d), month(d), day(d) from t4;`).Check(results) - } - - runWithMode("STRICT_TRANS_TABLES,ALLOW_INVALID_DATES") - runWithMode("ALLOW_INVALID_DATES") -} - -func TestPartitionInsertOnDuplicate(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000))`) - tk.MustExec(`insert into t1 set a=1, b=1`) - tk.MustExec(`insert into t1 set a=1,b=1 on duplicate key update a=1,b=1`) - tk.MustQuery(`select * from t1`).Check(testkit.Rows("1 1")) - - tk.MustExec(`create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4`) - tk.MustExec(`insert into t2 set a=1,b=1;`) - tk.MustExec(`insert into t2 set a=1,b=1 on duplicate key update a=1,b=1`) - tk.MustQuery(`select * from t2`).Check(testkit.Rows("1 1")) - - tk.MustExec(`CREATE TABLE t3 (a int, b int, c int, d int, e int, - PRIMARY KEY (a,b), - UNIQUE KEY (b,c,d) -) PARTITION BY RANGE ( b ) ( - PARTITION p0 VALUES LESS THAN (4), - PARTITION p1 VALUES LESS THAN (7), - PARTITION p2 VALUES LESS THAN (11) -)`) - tk.MustExec("insert into t3 values (1,2,3,4,5)") - tk.MustExec("insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e)") - tk.MustQuery("select * from t3").Check(testkit.Rows("1 2 3 4 16")) -} - -func TestBit(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table t1 (a bit(3))`) - tk.MustMatchErrMsg("insert into t1 values(-1)", ".*Data too long for column 'a' at.*") - tk.MustMatchErrMsg("insert into t1 values(9)", ".*Data too long for column 'a' at.*") - tk.MustExec(`create table t64 (a bit(64))`) - tk.MustExec("insert into t64 values(-1)") - tk.MustExec("insert into t64 values(18446744073709551615)") // 2^64 - 1 - tk.MustMatchErrMsg("insert into t64 values(18446744073709551616)", ".*Out of range value for column 'a' at.*") // z^64 -} - func TestAllocateContinuousRowID(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -694,92 +291,6 @@ func TestAllocateContinuousRowID(t *testing.T) { wg.Wait() } -func TestJiraIssue5366(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table bug (a varchar(100))`) - tk.MustExec(` insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no;`) - tk.MustQuery(`select * from bug`).Sort().Check(testkit.Rows("20180531557", "20190430140319679394")) -} - -func TestDMLCast(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table t (a int, b double)`) - tk.MustExec(`insert into t values (ifnull('',0)+0, 0)`) - tk.MustExec(`insert into t values (0, ifnull('',0)+0)`) - tk.MustQuery(`select * from t`).Check(testkit.Rows("0 0", "0 0")) - tk.MustExecToErr(`insert into t values ('', 0)`) - tk.MustExecToErr(`insert into t values (0, '')`) - tk.MustExecToErr(`update t set a = ''`) - tk.MustExecToErr(`update t set b = ''`) - tk.MustExec("update t set a = ifnull('',0)+0") - tk.MustExec("update t set b = ifnull('',0)+0") - tk.MustExec("delete from t where a = ''") - tk.MustQuery(`select * from t`).Check(testkit.Rows()) -} - -func TestInsertFloatOverflow(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t,t1;`) - tk.MustExec("create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2));") - tk.MustGetErrMsg("insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99);", - "[types:1264]Out of range value for column 'col1' at row 1") - tk.MustGetErrMsg("insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99);", - "[types:1264]Out of range value for column 'col2' at row 1") - tk.MustExec("create table t1(id1 float,id2 float)") - tk.MustExec("insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999)") - tk.MustQuery("select @@warning_count").Check(testkit.RowsWithSep("|", "2")) - tk.MustQuery("select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1").Check(testkit.Rows("340282346638528860000000000000000000000 -340282346638528860000000000000000000000")) - tk.MustExec("drop table if exists t,t1") -} - -// Fix https://github.com/pingcap/tidb/issues/32601. -func TestTextTooLongError(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - // Set strict sql_mode - tk.MustExec("set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION';") - - // For max_allowed_packet default value is big enough to ensure tinytext, text can test correctly. - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4);") - tk.MustGetErrMsg("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128));", - "[types:1406]Data too long for column 'c1' at row 1") - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4);") - tk.MustGetErrMsg("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768));", - "[types:1406]Data too long for column 'c1' at row 1") - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 mediumtext);") - tk.MustGetErrMsg("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215));", - "[types:1406]Data too long for column 'c1' at row 1") - - // For long text, max_allowed_packet default value can not allow 4GB package, skip the test case. - - // Set non strict sql_mode, we are not supposed to raise an error but to truncate the value. - tk.MustExec("set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION';") - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4);") - tk.MustExec("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128));") - - tk.MustQuery(`select length(c1) from t1;`).Check(testkit.Rows("254")) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec("CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4);") - tk.MustExec("INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768));") - tk.MustQuery(`select length(c1) from t1;`).Check(testkit.Rows("65534")) - // For mediumtext or bigger size, for tikv limit, we will get:ERROR 8025 (HY000): entry too large, the max entry size is 6291456, the size of data is 16777247, no need to test. -} - func TestAutoRandomID(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -892,201 +403,6 @@ func TestAutoRandomIDAllowZero(t *testing.T) { tk.MustExec(`drop table ar`) } - -func TestAutoRandomIDExplicit(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("set @@allow_auto_random_explicit_insert = true") - - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists ar`) - tk.MustExec(`create table ar (id bigint key clustered auto_random, name char(10))`) - - tk.MustExec(`insert into ar(id) values (1)`) - tk.MustQuery(`select id from ar`).Check(testkit.Rows("1")) - tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows("0")) - tk.MustExec(`delete from ar`) - - tk.MustExec(`insert into ar(id) values (1), (2)`) - tk.MustQuery(`select id from ar`).Check(testkit.Rows("1", "2")) - tk.MustQuery(`select last_insert_id()`).Check(testkit.Rows("0")) - tk.MustExec(`delete from ar`) - - tk.MustExec(`drop table ar`) -} - -func TestInsertErrorMsg(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t (a int primary key, b datetime, d date)`) - tk.MustContainErrMsg(`insert into t values (1, '2019-02-11 30:00:00', '2019-01-31')`, - "Incorrect datetime value: '2019-02-11 30:00:00' for column 'b' at row 1") - - // test for Issue #35289 - tk.MustExec("CREATE TABLE t1 (a BINARY(16) PRIMARY KEY);") - tk.MustExec(`INSERT INTO t1 VALUES (AES_ENCRYPT('a','a'));`) - err := tk.ExecToErr(`INSERT INTO t1 VALUES (AES_ENCRYPT('a','a'));`) - require.Error(t, err, `ERROR 1062 (23000): Duplicate entry '{ W]\xA1\x06u\x9D\xBD\xB1\xA3.\xE2\xD9\xA7t' for key 't1.PRIMARY'`) - - tk.MustExec(`INSERT INTO t1 VALUES (AES_ENCRYPT('b','b'));`) - err = tk.ExecToErr(`INSERT INTO t1 VALUES (AES_ENCRYPT('b','b'));`) - require.Error(t, err, "ERROR 1062 (23000): Duplicate entry '\\x0C\\x1E\\x8DG`\\xEB\\x93 F&BC\\xF0\\xB5\\xF4\\xB7' for key 't1.PRIMARY'") - - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (a bit primary key) engine=innodb;") - tk.MustExec("insert into t1 values (b'0');") - err = tk.ExecToErr(`insert into t1 values (b'0');`) - require.Error(t, err, `ERROR 1062 (23000): Duplicate entry '\x00' for key 't1.PRIMARY'`) -} - -func TestIssue16366(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test;`) - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t(c numeric primary key);`) - tk.MustExec("insert ignore into t values(null);") - tk.MustContainErrMsg(`insert into t values(0);`, "Duplicate entry '0' for key 't.PRIMARY'") -} - -func TestClusterPrimaryTablePlainInsert(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists t1pk`) - tk.MustExec(`create table t1pk(id varchar(200) primary key, v int)`) - tk.MustExec(`insert into t1pk(id, v) values('abc', 1)`) - tk.MustQuery(`select * from t1pk`).Check(testkit.Rows("abc 1")) - tk.MustExec(`set @@tidb_constraint_check_in_place=true`) - tk.MustGetErrCode(`insert into t1pk(id, v) values('abc', 2)`, errno.ErrDupEntry) - tk.MustExec(`set @@tidb_constraint_check_in_place=false`) - tk.MustGetErrCode(`insert into t1pk(id, v) values('abc', 3)`, errno.ErrDupEntry) - tk.MustQuery(`select v, id from t1pk`).Check(testkit.Rows("1 abc")) - tk.MustQuery(`select id from t1pk where id = 'abc'`).Check(testkit.Rows("abc")) - tk.MustQuery(`select v, id from t1pk where id = 'abc'`).Check(testkit.Rows("1 abc")) - - tk.MustExec(`drop table if exists t3pk`) - tk.MustExec(`create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3))`) - tk.MustExec(`insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1)`) - tk.MustQuery(`select * from t3pk`).Check(testkit.Rows("abc xyz 1 100")) - tk.MustExec(`set @@tidb_constraint_check_in_place=true`) - tk.MustGetErrCode(`insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2)`, errno.ErrDupEntry) - tk.MustExec(`set @@tidb_constraint_check_in_place=false`) - tk.MustGetErrCode(`insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3)`, errno.ErrDupEntry) - tk.MustQuery(`select v, id3, id2, id1 from t3pk`).Check(testkit.Rows("1 100 xyz abc")) - tk.MustQuery(`select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'`).Check(testkit.Rows("100 xyz abc")) - tk.MustQuery(`select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'`).Check(testkit.Rows("100 xyz abc 1")) - tk.MustExec(`insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1)`) - tk.MustExec(`insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1)`) - - tk.MustExec(`drop table if exists t1pku`) - tk.MustExec(`create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk))`) - tk.MustExec(`insert into t1pku(id, uk, v) values('abc', 1, 2)`) - tk.MustQuery(`select * from t1pku where id = 'abc'`).Check(testkit.Rows("abc 1 2")) - tk.MustGetErrCode(`insert into t1pku(id, uk, v) values('aaa', 1, 3)`, errno.ErrDupEntry) - tk.MustQuery(`select * from t1pku`).Check(testkit.Rows("abc 1 2")) - - tk.MustQuery(`select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101))`). - Check(testkit.Rows("abc xyz 1 100", "abc xyz 1 101", "abc zzz 1 101")) -} - -func TestClusterPrimaryTableInsertIgnore(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists it1pk`) - tk.MustExec(`create table it1pk(id varchar(200) primary key, v int)`) - tk.MustExec(`insert into it1pk(id, v) values('abc', 1)`) - tk.MustExec(`insert ignore into it1pk(id, v) values('abc', 2)`) - tk.MustQuery(`select * from it1pk where id = 'abc'`).Check(testkit.Rows("abc 1")) - - tk.MustExec(`drop table if exists it2pk`) - tk.MustExec(`create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2))`) - tk.MustExec(`insert into it2pk(id1, id2, v) values('abc', 'cba', 1)`) - tk.MustQuery(`select * from it2pk where id1 = 'abc' and id2 = 'cba'`).Check(testkit.Rows("abc cba 1")) - tk.MustExec(`insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2)`) - tk.MustQuery(`select * from it2pk where id1 = 'abc' and id2 = 'cba'`).Check(testkit.Rows("abc cba 1")) - - tk.MustExec(`drop table if exists it1pku`) - tk.MustExec(`create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk))`) - tk.MustExec(`insert into it1pku(id, uk, v) values('abc', 1, 2)`) - tk.MustQuery(`select * from it1pku where id = 'abc'`).Check(testkit.Rows("abc 1 2")) - tk.MustExec(`insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1)`) - tk.MustQuery(`select * from it1pku`).Check(testkit.Rows("abc 1 2", "bbb 2 1")) -} - -func TestClusterPrimaryTableInsertDuplicate(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists dt1pi`) - tk.MustExec(`create table dt1pi(id varchar(200) primary key, v int)`) - tk.MustExec(`insert into dt1pi(id, v) values('abb', 1),('acc', 2)`) - tk.MustExec(`insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1`) - tk.MustQuery(`select * from dt1pi`).Check(testkit.Rows("abb 2", "acc 2")) - tk.MustExec(`insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'`) - tk.MustQuery(`select * from dt1pi`).Check(testkit.Rows("acc 2", "xxx 3")) - - tk.MustExec(`drop table if exists dt1piu`) - tk.MustExec(`create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk))`) - tk.MustExec(`insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20)`) - tk.MustExec(`insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1`) - tk.MustQuery(`select * from dt1piu`).Check(testkit.Rows("abb 1 11", "acc 2 20")) - tk.MustExec(`insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'`) - tk.MustQuery(`select * from dt1piu`).Check(testkit.Rows("acc 2 20", "xxx 1 12")) - - tk.MustExec(`drop table if exists ts1pk`) - tk.MustExec(`create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2))`) - ts := "2018-01-01 11:11:11" - tk.MustExec(`insert into ts1pk (id1, id2, v) values(?, ?, ?)`, ts, ts, 1) - tk.MustQuery(`select id1, id2, v from ts1pk`).Check(testkit.Rows("2018-01-01 11:11:11 2018-01-01 11:11:11 1")) - tk.MustExec(`insert into ts1pk (id1, id2, v) values(?, ?, ?) on duplicate key update v = values(v)`, ts, ts, 2) - tk.MustQuery(`select id1, id2, v from ts1pk`).Check(testkit.Rows("2018-01-01 11:11:11 2018-01-01 11:11:11 2")) - tk.MustExec(`insert into ts1pk (id1, id2, v) values(?, ?, ?) on duplicate key update v = values(v), id1 = ?`, ts, ts, 2, "2018-01-01 11:11:12") - tk.MustQuery(`select id1, id2, v from ts1pk`).Check(testkit.Rows("2018-01-01 11:11:12 2018-01-01 11:11:11 2")) -} - -func TestClusterPrimaryKeyForIndexScan(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec("drop table if exists pkt1;") - tk.MustExec("CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b));") - tk.MustExec("insert into pkt1 values ('aaa',1);") - tk.MustQuery(`select b from pkt1 where b = 1;`).Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists pkt2;") - tk.MustExec("CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b));") - tk.MustExec("insert into pkt2 values ('aaa',1);") - tk.MustQuery(`select b from pkt2 where b = 1;`).Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists issue_18232;") - tk.MustExec("create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c));") - - iter, cnt := combination([]string{"a", "b", "c", "d"}), 0 - for { - comb := iter() - if comb == nil { - break - } - selField := strings.Join(comb, ",") - sql := fmt.Sprintf("select %s from issue_18232 use index (idx);", selField) - tk.MustExec(sql) - cnt++ - } - require.Equal(t, 15, cnt) -} - func TestInsertRuntimeStat(t *testing.T) { stats := &executor.InsertRuntimeStat{ BasicRuntimeStats: &execdetails.BasicRuntimeStats{}, @@ -1169,180 +485,6 @@ func TestDuplicateEntryMessage(t *testing.T) { } } -func TestIssue20768(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a year, primary key(a))") - tk.MustExec("insert ignore into t1 values(null)") - tk.MustExec("create table t2(a int, key(a))") - tk.MustExec("insert into t2 values(0)") - tk.MustQuery("select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) - tk.MustQuery("select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a").Check(testkit.Rows("0 0")) -} - -func TestIssue10402(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table vctt (v varchar(4), c char(4))") - tk.MustExec("insert into vctt values ('ab ', 'ab ')") - tk.MustQuery("select * from vctt").Check(testkit.Rows("ab ab")) - tk.MustExec("delete from vctt") - tk.Session().GetSessionVars().StmtCtx.SetWarnings(nil) - tk.MustExec("insert into vctt values ('ab\\n\\n\\n', 'ab\\n\\n\\n'), ('ab\\t\\t\\t', 'ab\\t\\t\\t'), ('ab ', 'ab '), ('ab\\r\\r\\r', 'ab\\r\\r\\r')") - require.Equal(t, uint16(4), tk.Session().GetSessionVars().StmtCtx.WarningCount()) - warns := tk.Session().GetSessionVars().StmtCtx.GetWarnings() - require.Equal(t, "[{Warning [types:1265]Data truncated for column 'v' at row 1} {Warning [types:1265]Data truncated for column 'v' at row 2} {Warning [types:1265]Data truncated for column 'v' at row 3} {Warning [types:1265]Data truncated for column 'v' at row 4}]", - fmt.Sprintf("%v", warns)) - tk.MustQuery("select * from vctt").Check(testkit.Rows("ab\n\n ab\n\n", "ab\t\t ab\t\t", "ab ab", "ab\r\r ab\r\r")) - tk.MustQuery("select length(v), length(c) from vctt").Check(testkit.Rows("4 4", "4 4", "4 2", "4 4")) -} - -func combination(items []string) func() []string { - current := 1 - buf := make([]string, len(items)) - return func() []string { - if current >= int(math.Pow(2, float64(len(items)))) { - return nil - } - buf = buf[:0] - for i, e := range items { - if (1< server index => row rows := map[string][][]string{} - for _, typ := range []string{"tidb", "tikv", "tiflash", "pd"} { + for _, typ := range []string{"tidb", "tikv", "tiflash", "tiproxy", "pd"} { for _, server := range testServers { rows[typ] = append(rows[typ], []string{ fmt.Sprintf("%s %s key1 value1", typ, server.address), @@ -247,7 +258,7 @@ func TestTiDBClusterConfig(t *testing.T) { }{ { sql: "select * from information_schema.cluster_config", - reqCount: 12, + reqCount: 15, rows: flatten( rows["tidb"][0], rows["tidb"][1], @@ -258,6 +269,9 @@ func TestTiDBClusterConfig(t *testing.T) { rows["tiflash"][0], rows["tiflash"][1], rows["tiflash"][2], + rows["tiproxy"][0], + rows["tiproxy"][1], + rows["tiproxy"][2], rows["pd"][0], rows["pd"][1], rows["pd"][2], @@ -277,11 +291,12 @@ func TestTiDBClusterConfig(t *testing.T) { }, { sql: "select * from information_schema.cluster_config where type='pd' or instance='" + testServers[0].address + "'", - reqCount: 12, + reqCount: 15, rows: flatten( rows["tidb"][0], rows["tikv"][0], rows["tiflash"][0], + rows["tiproxy"][0], rows["pd"][0], rows["pd"][1], rows["pd"][2], @@ -357,11 +372,12 @@ func TestTiDBClusterConfig(t *testing.T) { { sql: fmt.Sprintf(`select * from information_schema.cluster_config where instance='%s'`, testServers[0].address), - reqCount: 4, + reqCount: 5, rows: flatten( rows["tidb"][0], rows["tikv"][0], rows["tiflash"][0], + rows["tiproxy"][0], rows["pd"][0], ), }, @@ -502,6 +518,22 @@ func TestTiDBClusterLog(t *testing.T) { logtime(`2019/08/26 06:28:19.011`) + ` [critical] [test log message tikv 14, bar]`, }) + // TiProxy + writeTmpFile(t, testServers["tiproxy"].tmpDir, "tiproxy.log", []string{ + logtime(`2019/08/26 06:19:13.011`) + ` [INFO] [test log message tiproxy 1, foo]`, + logtime(`2019/08/26 06:20:14.011`) + ` [DEBUG] [test log message tiproxy 2, foo]`, + logtime(`2019/08/26 06:21:15.011`) + ` [error] [test log message tiproxy 3, foo]`, + logtime(`2019/08/26 06:22:16.011`) + ` [trace] [test log message tiproxy 4, foo]`, + logtime(`2019/08/26 06:23:17.011`) + ` [CRITICAL] [test log message tiproxy 5, foo]`, + }) + writeTmpFile(t, testServers["tiproxy"].tmpDir, "tiproxy-1.log", []string{ + logtime(`2019/08/26 06:24:15.011`) + ` [info] [test log message tiproxy 10, bar]`, + logtime(`2019/08/26 06:25:16.011`) + ` [debug] [test log message tiproxy 11, bar]`, + logtime(`2019/08/26 06:26:17.011`) + ` [ERROR] [test log message tiproxy 12, bar]`, + logtime(`2019/08/26 06:27:18.011`) + ` [TRACE] [test log message tiproxy 13, bar]`, + logtime(`2019/08/26 06:28:19.011`) + ` [critical] [test log message tiproxy 14, bar]`, + }) + // PD writeTmpFile(t, testServers["pd"].tmpDir, "pd.log", []string{ logtime(`2019/08/26 06:18:13.011`) + ` [INFO] [test log message pd 1, foo]`, @@ -522,33 +554,43 @@ func TestTiDBClusterLog(t *testing.T) { {"2019/08/26 06:18:13.011", "pd", "INFO", "[test log message pd 1, foo]"}, {"2019/08/26 06:19:13.011", "tidb", "INFO", "[test log message tidb 1, foo]"}, {"2019/08/26 06:19:13.011", "tikv", "INFO", "[test log message tikv 1, foo]"}, + {"2019/08/26 06:19:13.011", "tiproxy", "INFO", "[test log message tiproxy 1, foo]"}, {"2019/08/26 06:19:14.011", "pd", "DEBUG", "[test log message pd 2, foo]"}, {"2019/08/26 06:19:14.011", "tidb", "DEBUG", "[test log message tidb 2, foo]"}, {"2019/08/26 06:19:15.011", "tidb", "error", "[test log message tidb 3, foo]"}, {"2019/08/26 06:19:16.011", "tidb", "trace", "[test log message tidb 4, foo]"}, {"2019/08/26 06:19:17.011", "tidb", "CRITICAL", "[test log message tidb 5, foo]"}, {"2019/08/26 06:20:14.011", "tikv", "DEBUG", "[test log message tikv 2, foo]"}, + {"2019/08/26 06:20:14.011", "tiproxy", "DEBUG", "[test log message tiproxy 2, foo]"}, {"2019/08/26 06:20:15.011", "pd", "error", "[test log message pd 3, foo]"}, {"2019/08/26 06:21:15.011", "tikv", "error", "[test log message tikv 3, foo]"}, + {"2019/08/26 06:21:15.011", "tiproxy", "error", "[test log message tiproxy 3, foo]"}, {"2019/08/26 06:21:16.011", "pd", "trace", "[test log message pd 4, foo]"}, {"2019/08/26 06:22:16.011", "tikv", "trace", "[test log message tikv 4, foo]"}, + {"2019/08/26 06:22:16.011", "tiproxy", "trace", "[test log message tiproxy 4, foo]"}, {"2019/08/26 06:22:17.011", "pd", "CRITICAL", "[test log message pd 5, foo]"}, {"2019/08/26 06:23:13.011", "pd", "info", "[test log message pd 10, bar]"}, {"2019/08/26 06:23:17.011", "tikv", "CRITICAL", "[test log message tikv 5, foo]"}, + {"2019/08/26 06:23:17.011", "tiproxy", "CRITICAL", "[test log message tiproxy 5, foo]"}, {"2019/08/26 06:24:14.011", "pd", "debug", "[test log message pd 11, bar]"}, {"2019/08/26 06:24:15.011", "tikv", "info", "[test log message tikv 10, bar]"}, + {"2019/08/26 06:24:15.011", "tiproxy", "info", "[test log message tiproxy 10, bar]"}, {"2019/08/26 06:25:13.011", "tidb", "info", "[test log message tidb 10, bar]"}, {"2019/08/26 06:25:14.011", "tidb", "debug", "[test log message tidb 11, bar]"}, {"2019/08/26 06:25:15.011", "pd", "ERROR", "[test log message pd 12, bar]"}, {"2019/08/26 06:25:15.011", "tidb", "ERROR", "[test log message tidb 12, bar]"}, {"2019/08/26 06:25:16.011", "tidb", "TRACE", "[test log message tidb 13, bar]"}, {"2019/08/26 06:25:16.011", "tikv", "debug", "[test log message tikv 11, bar]"}, + {"2019/08/26 06:25:16.011", "tiproxy", "debug", "[test log message tiproxy 11, bar]"}, {"2019/08/26 06:25:17.011", "tidb", "critical", "[test log message tidb 14, bar]"}, {"2019/08/26 06:26:16.011", "pd", "TRACE", "[test log message pd 13, bar]"}, {"2019/08/26 06:26:17.011", "tikv", "ERROR", "[test log message tikv 12, bar]"}, + {"2019/08/26 06:26:17.011", "tiproxy", "ERROR", "[test log message tiproxy 12, bar]"}, {"2019/08/26 06:27:17.011", "pd", "critical", "[test log message pd 14, bar]"}, {"2019/08/26 06:27:18.011", "tikv", "TRACE", "[test log message tikv 13, bar]"}, + {"2019/08/26 06:27:18.011", "tiproxy", "TRACE", "[test log message tiproxy 13, bar]"}, {"2019/08/26 06:28:19.011", "tikv", "critical", "[test log message tikv 14, bar]"}, + {"2019/08/26 06:28:19.011", "tiproxy", "critical", "[test log message tiproxy 14, bar]"}, } var cases = []struct { @@ -572,14 +614,17 @@ func TestTiDBClusterLog(t *testing.T) { expected: [][]string{ {"2019/08/26 06:19:13.011", "tidb", "INFO", "[test log message tidb 1, foo]"}, {"2019/08/26 06:19:13.011", "tikv", "INFO", "[test log message tikv 1, foo]"}, + {"2019/08/26 06:19:13.011", "tiproxy", "INFO", "[test log message tiproxy 1, foo]"}, {"2019/08/26 06:19:14.011", "pd", "DEBUG", "[test log message pd 2, foo]"}, {"2019/08/26 06:19:14.011", "tidb", "DEBUG", "[test log message tidb 2, foo]"}, {"2019/08/26 06:19:15.011", "tidb", "error", "[test log message tidb 3, foo]"}, {"2019/08/26 06:19:16.011", "tidb", "trace", "[test log message tidb 4, foo]"}, {"2019/08/26 06:19:17.011", "tidb", "CRITICAL", "[test log message tidb 5, foo]"}, {"2019/08/26 06:20:14.011", "tikv", "DEBUG", "[test log message tikv 2, foo]"}, + {"2019/08/26 06:20:14.011", "tiproxy", "DEBUG", "[test log message tiproxy 2, foo]"}, {"2019/08/26 06:20:15.011", "pd", "error", "[test log message pd 3, foo]"}, {"2019/08/26 06:21:15.011", "tikv", "error", "[test log message tikv 3, foo]"}, + {"2019/08/26 06:21:15.011", "tiproxy", "error", "[test log message tiproxy 3, foo]"}, }, }, { @@ -715,9 +760,11 @@ func TestTiDBClusterLog(t *testing.T) { {"2019/08/26 06:19:17.011", "tidb", "CRITICAL", "[test log message tidb 5, foo]"}, {"2019/08/26 06:22:17.011", "pd", "CRITICAL", "[test log message pd 5, foo]"}, {"2019/08/26 06:23:17.011", "tikv", "CRITICAL", "[test log message tikv 5, foo]"}, + {"2019/08/26 06:23:17.011", "tiproxy", "CRITICAL", "[test log message tiproxy 5, foo]"}, {"2019/08/26 06:25:17.011", "tidb", "critical", "[test log message tidb 14, bar]"}, {"2019/08/26 06:27:17.011", "pd", "critical", "[test log message pd 14, bar]"}, {"2019/08/26 06:28:19.011", "tikv", "critical", "[test log message tikv 14, bar]"}, + {"2019/08/26 06:28:19.011", "tiproxy", "critical", "[test log message tiproxy 14, bar]"}, }, }, { diff --git a/pkg/executor/merge_join_test.go b/pkg/executor/merge_join_test.go index 02b7725715ead..c0168c2cd2c24 100644 --- a/pkg/executor/merge_join_test.go +++ b/pkg/executor/merge_join_test.go @@ -30,198 +30,6 @@ import ( "github.com/stretchr/testify/require" ) -const plan1 = `[[TableScan_12 { - "db": "test", - "table": "t1", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_17] [TableScan_15 { - "db": "test", - "table": "t2", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_17] [MergeJoin_17 { - "eqCond": [ - "eq(test.t1.c1, test.t2.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "TableScan_12", - "rightPlan": "TableScan_15", - "desc": "false" -} MergeJoin_8] [TableScan_22 { - "db": "test", - "table": "t3", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_8] [MergeJoin_8 { - "eqCond": [ - "eq(test.t2.c1, test.t3.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "MergeJoin_17", - "rightPlan": "TableScan_22", - "desc": "false" -} Sort_23] [Sort_23 { - "exprs": [ - { - "Expr": "test.t1.c1", - "Desc": false - } - ], - "limit": null, - "child": "MergeJoin_8" -} ]]` - -const plan2 = `[[TableScan_12 { - "db": "test", - "table": "t1", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_17] [TableScan_15 { - "db": "test", - "table": "t2", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_17] [MergeJoin_17 { - "eqCond": [ - "eq(test.t1.c1, test.t2.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "TableScan_12", - "rightPlan": "TableScan_15", - "desc": "false" -} MergeJoin_8] [TableScan_22 { - "db": "test", - "table": "t3", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_8] [MergeJoin_8 { - "eqCond": [ - "eq(test.t2.c1, test.t3.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "MergeJoin_17", - "rightPlan": "TableScan_22", - "desc": "false" -} Sort_23] [Sort_23 { - "exprs": [ - { - "Expr": "test.t1.c1", - "Desc": false - } - ], - "limit": null, - "child": "MergeJoin_8" -} ]]` - -const plan3 = `[[TableScan_12 { - "db": "test", - "table": "t1", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_9] [TableScan_15 { - "db": "test", - "table": "t2", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_9] [MergeJoin_9 { - "eqCond": [ - "eq(test.t1.c1, test.t2.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "TableScan_12", - "rightPlan": "TableScan_15", - "desc": "false" -} Sort_16] [Sort_16 { - "exprs": [ - { - "Expr": "test.t1.c1", - "Desc": false - } - ], - "limit": null, - "child": "MergeJoin_9" -} MergeJoin_8] [TableScan_23 { - "db": "test", - "table": "t3", - "desc": false, - "keep order": true, - "push down info": { - "limit": 0, - "access conditions": null, - "index filter conditions": null, - "table filter conditions": null - } -} MergeJoin_8] [MergeJoin_8 { - "eqCond": [ - "eq(test.t1.c1, test.t3.c1)" - ], - "leftCond": null, - "rightCond": null, - "otherCond": [], - "leftPlan": "Sort_16", - "rightPlan": "TableScan_23", - "desc": "false" -} ]]` - func checkMergeAndRun(tk *testkit.TestKit, t *testing.T, sql string) *testkit.Result { explainedSQL := "explain format = 'brief' " + sql result := tk.MustQuery(explainedSQL) @@ -230,15 +38,6 @@ func checkMergeAndRun(tk *testkit.TestKit, t *testing.T, sql string) *testkit.Re return tk.MustQuery(sql) } -func checkPlanAndRun(tk *testkit.TestKit, t *testing.T, plan string, sql string) *testkit.Result { - explainedSQL := "explain format = 'brief' " + sql - /* result := */ tk.MustQuery(explainedSQL) - // TODO: Reopen it after refactoring explain. - // resultStr := fmt.Sprintf("%v", result.Rows()) - // require.Equal(t, resultStr, plan) - return tk.MustQuery(sql) -} - func TestShuffleMergeJoinInDisk(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/testMergeJoinRowContainerSpill", "return(true)")) defer func() { @@ -318,418 +117,6 @@ func TestMergeJoinInDisk(t *testing.T) { require.Greater(t, tk.Session().GetSessionVars().StmtCtx.DiskTracker.MaxConsumed(), int64(0)) } -func TestMergeJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int, c2 int)") - tk.MustExec("create table t1(c1 int, c2 int)") - tk.MustExec("insert into t values(1,1),(2,2)") - tk.MustExec("insert into t1 values(2,3),(4,4)") - - result := checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows("1 1 ")) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows(" 1 1")) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows()) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false") - result.Check(testkit.Rows()) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1") - result.Check(testkit.Rows("1 1 ", "2 2 2 3")) - - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("drop table if exists t3") - - tk.MustExec("create table t1 (c1 int, c2 int)") - tk.MustExec("create table t2 (c1 int, c2 int)") - tk.MustExec("create table t3 (c1 int, c2 int)") - - tk.MustExec("insert into t1 values (1,1), (2,2), (3,3)") - tk.MustExec("insert into t2 values (1,1), (3,3), (5,5)") - tk.MustExec("insert into t3 values (1,1), (5,5), (9,9)") - - result = tk.MustQuery("select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2;") - result.Check(testkit.Rows(" 5 5", " 9 9", "1 1 1 1 1 1")) - - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (c1 int)") - tk.MustExec("insert into t1 values (1), (1), (1)") - result = tk.MustQuery("select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1;") - result.Check(testkit.Rows("1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int, index k(c1))") - tk.MustExec("create table t1(c1 int)") - tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7)") - tk.MustExec("insert into t1 values (1),(2),(3),(4),(5),(6),(7)") - result = tk.MustQuery("select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1;") - result.Check(testkit.Rows("1", "2", "3")) - // Test LogicalSelection under LogicalJoin. - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1;") - result.Check(testkit.Rows("1", "2", "3")) - tk.MustExec("begin;") - // Test LogicalLock under LogicalJoin. - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - // Test LogicalUnionScan under LogicalJoin. - tk.MustExec("insert into t1 values(8);") - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - tk.MustExec("rollback;") - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int)") - tk.MustExec("create table t1(c1 int unsigned)") - tk.MustExec("insert into t values (1)") - tk.MustExec("insert into t1 values (1)") - result = tk.MustQuery("select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1") - result.Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, index a(a), index b(b))") - tk.MustExec("insert into t values(1, 2)") - tk.MustQuery("select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a").Check(testkit.Rows(" 2")) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists s") - tk.MustExec("create table t(a int, b int, primary key(a, b))") - tk.MustExec("insert into t value(1,1),(1,2),(1,3),(1,4)") - tk.MustExec("create table s(a int, primary key(a))") - tk.MustExec("insert into s value(1)") - tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a").Check(testkit.Rows("4")) - - // Test TIDB_SMJ for cartesian product. - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t value(1),(2)") - tk.MustQuery("explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( - "Sort 100000000.00 root test.t.a, test.t.a", - "└─MergeJoin 100000000.00 root inner join", - " ├─TableReader(Build) 10000.00 root data:TableFullScan", - " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - )) - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( - "1 1", - "1 2", - "2 1", - "2 2", - )) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists s") - tk.MustExec("create table t(a int, b int)") - tk.MustExec("insert into t values(1,1),(1,2)") - tk.MustExec("create table s(a int, b int)") - tk.MustExec("insert into s values(1,1)") - tk.MustQuery("explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t").Check(testkit.Rows( - "MergeJoin 10000.00 root left outer semi join, other cond:eq(test.t.a, test.s.a), ge(test.s.b, test.t.b)", - "├─TableReader(Build) 10000.00 root data:TableFullScan", - "│ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - "└─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo", - )) - tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t").Check(testkit.Rows( - "1", - "0", - )) - - // Test TIDB_SMJ for join with order by desc, see https://github.com/pingcap/tidb/issues/14483 - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t (a int, key(a))") - tk.MustExec("create table t1 (a int, key(a))") - tk.MustExec("insert into t values (1), (2), (3)") - tk.MustExec("insert into t1 values (1), (2), (3)") - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc").Check(testkit.Rows( - "3", "2", "1")) - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int, key(a), key(b))") - tk.MustExec("insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3)") - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc").Check(testkit.Rows( - "3", "3", "3", "3", "3", "3", - "2", "2", "2", "2", "2", "2", - "1", "1", "1", "1", "1", "1", "1", "1", "1")) - - tk.MustExec("drop table if exists s") - tk.MustExec("create table s (a int)") - tk.MustExec("insert into s values (4), (1), (3), (2)") - tk.MustQuery("explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc").Check(testkit.Rows( - "Sort 12487.50 root test.s.a:desc", - "└─HashJoin 12487.50 root inner join, equal:[eq(test.s.a, test.s.a)]", - " ├─TableReader(Build) 9990.00 root data:Selection", - " │ └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))", - " │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))", - " └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - )) - tk.MustQuery("select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc").Check(testkit.Rows( - "4", "3", "2", "1")) -} - -func TestShuffleMergeJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_merge_join_concurrency = 4;") - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int, c2 int)") - tk.MustExec("create table t1(c1 int, c2 int)") - tk.MustExec("insert into t values(1,1),(2,2)") - tk.MustExec("insert into t1 values(2,3),(4,4)") - - result := checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows("1 1 ")) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows(" 1 1")) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20") - result.Check(testkit.Rows()) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false") - result.Check(testkit.Rows()) - result = checkMergeAndRun(tk, t, "select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1") - result.Check(testkit.Rows("1 1 ", "2 2 2 3")) - - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("drop table if exists t3") - - tk.MustExec("create table t1 (c1 int, c2 int)") - tk.MustExec("create table t2 (c1 int, c2 int)") - tk.MustExec("create table t3 (c1 int, c2 int)") - - tk.MustExec("insert into t1 values (1,1), (2,2), (3,3)") - tk.MustExec("insert into t2 values (1,1), (3,3), (5,5)") - tk.MustExec("insert into t3 values (1,1), (5,5), (9,9)") - - result = tk.MustQuery("select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2;") - result.Check(testkit.Rows(" 5 5", " 9 9", "1 1 1 1 1 1")) - - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1 (c1 int)") - tk.MustExec("insert into t1 values (1), (1), (1)") - result = tk.MustQuery("select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1;") - result.Check(testkit.Rows("1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1", "1 1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int, index k(c1))") - tk.MustExec("create table t1(c1 int)") - tk.MustExec("insert into t values (1),(2),(3),(4),(5),(6),(7)") - tk.MustExec("insert into t1 values (1),(2),(3),(4),(5),(6),(7)") - result = tk.MustQuery("select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1;") - result.Check(testkit.Rows("1", "2", "3")) - // Test LogicalSelection under LogicalJoin. - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1;") - result.Check(testkit.Rows("1", "2", "3")) - tk.MustExec("begin;") - // Test LogicalLock under LogicalJoin. - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - // Test LogicalUnionScan under LogicalJoin. - tk.MustExec("insert into t1 values(8);") - result = tk.MustQuery("select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1;") - result.Check(testkit.Rows("1", "2", "3", "4", "5", "6", "7")) - tk.MustExec("rollback;") - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t(c1 int)") - tk.MustExec("create table t1(c1 int unsigned)") - tk.MustExec("insert into t values (1)") - tk.MustExec("insert into t1 values (1)") - result = tk.MustQuery("select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1") - result.Check(testkit.Rows("1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, index a(a), index b(b))") - tk.MustExec("insert into t values(1, 2)") - tk.MustQuery("select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a").Check(testkit.Rows(" 2")) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists s") - tk.MustExec("create table t(a int, b int, primary key(a, b))") - tk.MustExec("insert into t value(1,1),(1,2),(1,3),(1,4)") - tk.MustExec("create table s(a int, primary key(a))") - tk.MustExec("insert into s value(1)") - tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a").Check(testkit.Rows("4")) - - // Test TIDB_SMJ for cartesian product. - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t value(1),(2)") - tk.MustQuery("explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( - "Sort 100000000.00 root test.t.a, test.t.a", - "└─MergeJoin 100000000.00 root inner join", - " ├─TableReader(Build) 10000.00 root data:TableFullScan", - " │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo", - " └─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo", - )) - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a").Check(testkit.Rows( - "1 1", - "1 2", - "2 1", - "2 2", - )) - - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists s") - tk.MustExec("create table t(a int, b int)") - tk.MustExec("insert into t values(1,1),(1,2)") - tk.MustExec("create table s(a int, b int)") - tk.MustExec("insert into s values(1,1)") - tk.MustQuery("explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t").Check(testkit.Rows( - "MergeJoin 10000.00 root left outer semi join, other cond:eq(test.t.a, test.s.a), ge(test.s.b, test.t.b)", - "├─TableReader(Build) 10000.00 root data:TableFullScan", - "│ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - "└─TableReader(Probe) 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo", - )) - tk.MustQuery("select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t").Check(testkit.Rows( - "1", - "0", - )) - - // Test TIDB_SMJ for join with order by desc, see https://github.com/pingcap/tidb/issues/14483 - tk.MustExec("drop table if exists t") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t (a int, key(a))") - tk.MustExec("create table t1 (a int, key(a))") - tk.MustExec("insert into t values (1), (2), (3)") - tk.MustExec("insert into t1 values (1), (2), (3)") - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc").Check(testkit.Rows( - "3", "2", "1")) - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int, key(a), key(b))") - tk.MustExec("insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3)") - tk.MustQuery("select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc").Check(testkit.Rows( - "3", "3", "3", "3", "3", "3", - "2", "2", "2", "2", "2", "2", - "1", "1", "1", "1", "1", "1", "1", "1", "1")) - - tk.MustExec("drop table if exists s") - tk.MustExec("create table s (a int)") - tk.MustExec("insert into s values (4), (1), (3), (2)") - tk.MustQuery("explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc").Check(testkit.Rows( - "Sort 12487.50 root test.s.a:desc", - "└─HashJoin 12487.50 root inner join, equal:[eq(test.s.a, test.s.a)]", - " ├─TableReader(Build) 9990.00 root data:Selection", - " │ └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))", - " │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - " └─TableReader(Probe) 9990.00 root data:Selection", - " └─Selection 9990.00 cop[tikv] not(isnull(test.s.a))", - " └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo", - )) - tk.MustQuery("select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc").Check(testkit.Rows( - "4", "3", "2", "1")) -} - -func Test3WaysMergeJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("drop table if exists t3") - tk.MustExec("create table t1(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("create table t2(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("create table t3(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("insert into t1 values(1,1),(2,2),(3,3)") - tk.MustExec("insert into t2 values(2,3),(3,4),(4,5)") - tk.MustExec("insert into t3 values(1,2),(2,4),(3,10)") - result := checkPlanAndRun(tk, t, plan1, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) - - result = checkPlanAndRun(tk, t, plan2, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) - - // In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended - // On the other hand, t1 order kept so no final sort appended - result = checkPlanAndRun(tk, t, plan3, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) -} - -func Test3WaysShuffleMergeJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_merge_join_concurrency = 4;") - - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("drop table if exists t3") - tk.MustExec("create table t1(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("create table t2(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("create table t3(c1 int, c2 int, PRIMARY KEY (c1))") - tk.MustExec("insert into t1 values(1,1),(2,2),(3,3)") - tk.MustExec("insert into t2 values(2,3),(3,4),(4,5)") - tk.MustExec("insert into t3 values(1,2),(2,4),(3,10)") - result := checkPlanAndRun(tk, t, plan1, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) - - result = checkPlanAndRun(tk, t, plan2, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) - - // In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended - // On the other hand, t1 order kept so no final sort appended - result = checkPlanAndRun(tk, t, plan3, "select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1") - result.Check(testkit.Rows("2 2 2 3 2 4", "3 3 3 4 3 10")) -} - -func TestMergeJoinDifferentTypes(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("set @@session.tidb_executor_concurrency = 4;") - tk.MustExec("set @@session.tidb_hash_join_concurrency = 5;") - tk.MustExec("set @@session.tidb_distsql_scan_concurrency = 15;") - - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`drop table if exists t2;`) - tk.MustExec(`create table t1(a bigint, b bit(1), index idx_a(a));`) - tk.MustExec(`create table t2(a bit(1) not null, b bit(1), index idx_a(a));`) - tk.MustExec(`insert into t1 values(1, 1);`) - tk.MustExec(`insert into t2 values(1, 1);`) - tk.MustQuery(`select hex(t1.a), hex(t2.a) from t1 inner join t2 on t1.a=t2.a;`).Check(testkit.Rows(`1 1`)) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`drop table if exists t2;`) - tk.MustExec(`create table t1(a float, b double, index idx_a(a));`) - tk.MustExec(`create table t2(a double not null, b double, index idx_a(a));`) - tk.MustExec(`insert into t1 values(1, 1);`) - tk.MustExec(`insert into t2 values(1, 1);`) - tk.MustQuery(`select t1.a, t2.a from t1 inner join t2 on t1.a=t2.a;`).Check(testkit.Rows(`1 1`)) - - tk.MustExec(`drop table if exists t1;`) - tk.MustExec(`drop table if exists t2;`) - tk.MustExec(`create table t1(a bigint signed, b bigint, index idx_a(a));`) - tk.MustExec(`create table t2(a bigint unsigned, b bigint, index idx_a(a));`) - tk.MustExec(`insert into t1 values(-1, 0), (-1, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0);`) - tk.MustExec(`insert into t2 values(18446744073709551615, 0), (18446744073709551615, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0);`) - tk.MustQuery(`select t1.a, t2.a from t1 join t2 on t1.a=t2.a order by t1.a;`).Check(testkit.Rows( - `0 0`, - `0 0`, - `0 0`, - `0 0`, - )) -} - // TestVectorizedMergeJoin is used to test vectorized merge join with some corner cases. // //nolint:gosimple // generates false positive fmt.Sprintf warnings which keep aligned @@ -965,40 +352,3 @@ func TestVectorizedShuffleMergeJoin(t *testing.T) { runTest(ca.t2, ca.t1) } } - -func TestMergeJoinWithOtherConditions(t *testing.T) { - // more than one inner tuple should be filtered on other conditions - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists R;`) - tk.MustExec(`drop table if exists Y;`) - tk.MustExec(`create table Y (a int primary key, b int, index id_b(b));`) - tk.MustExec(`insert into Y values (0,2),(2,2);`) - tk.MustExec(`create table R (a int primary key, b int);`) - tk.MustExec(`insert into R values (2,2);`) - // the max() limits the required rows at most one - // TODO(fangzhuhe): specify Y as the build side using hints - tk.MustQuery(`select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a;`).Check(testkit.Rows( - `2`, - )) -} - -func TestShuffleMergeJoinWithOtherConditions(t *testing.T) { - // more than one inner tuple should be filtered on other conditions - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec("set @@session.tidb_merge_join_concurrency = 4;") - tk.MustExec(`drop table if exists R;`) - tk.MustExec(`drop table if exists Y;`) - tk.MustExec(`create table Y (a int primary key, b int, index id_b(b));`) - tk.MustExec(`insert into Y values (0,2),(2,2);`) - tk.MustExec(`create table R (a int primary key, b int);`) - tk.MustExec(`insert into R values (2,2);`) - // the max() limits the required rows at most one - // TODO(fangzhuhe): specify Y as the build side using hints - tk.MustQuery(`select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a;`).Check(testkit.Rows( - `2`, - )) -} diff --git a/pkg/executor/parallel_apply.go b/pkg/executor/parallel_apply.go index cd9b1b28321ae..ef210a798c532 100644 --- a/pkg/executor/parallel_apply.go +++ b/pkg/executor/parallel_apply.go @@ -20,12 +20,12 @@ import ( "sync" "sync/atomic" - "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/executor/internal/applycache" "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/parser/terror" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/execdetails" @@ -267,7 +267,7 @@ func (e *ParallelNestedLoopApplyExec) putResult(chk *chunk.Chunk, err error) (ex func (e *ParallelNestedLoopApplyExec) handleWorkerPanic(ctx context.Context, wg *sync.WaitGroup) { if r := recover(); r != nil { - err := errors.Errorf("%v", r) + err := util.GetRecoverError(r) logutil.Logger(ctx).Error("parallel nested loop join worker panicked", zap.Error(err), zap.Stack("stack")) e.resultChkCh <- result{nil, err} } diff --git a/pkg/executor/parallel_apply_test.go b/pkg/executor/parallel_apply_test.go index b05fe3c7bcdcf..d57e0d23c358b 100644 --- a/pkg/executor/parallel_apply_test.go +++ b/pkg/executor/parallel_apply_test.go @@ -20,9 +20,7 @@ import ( "testing" "github.com/pingcap/failpoint" - "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util/collate" "github.com/stretchr/testify/require" ) @@ -253,25 +251,6 @@ func TestApplyMultiColumnType(t *testing.T) { tk.MustQuery(sql).Sort().Check(testkit.Rows("2", "2", "3", "3", "", "")) } -func TestSetTiDBEnableParallelApply(t *testing.T) { - // validate the tidb_enable_parallel_apply's value - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_enable_parallel_apply=0") - tk.MustQuery("select @@tidb_enable_parallel_apply").Check(testkit.Rows("0")) - tk.MustExec("set tidb_enable_parallel_apply=1") - tk.MustQuery("select @@tidb_enable_parallel_apply").Check(testkit.Rows("1")) - tk.MustExec("set tidb_enable_parallel_apply=on") - tk.MustQuery("select @@tidb_enable_parallel_apply").Check(testkit.Rows("1")) - tk.MustExec("set tidb_enable_parallel_apply=off") - tk.MustQuery("select @@tidb_enable_parallel_apply").Check(testkit.Rows("0")) - require.Error(t, tk.ExecToErr("set tidb_enable_parallel_apply=-1")) - require.Error(t, tk.ExecToErr("set tidb_enable_parallel_apply=2")) - require.Error(t, tk.ExecToErr("set tidb_enable_parallel_apply=1000")) - require.Error(t, tk.ExecToErr("set tidb_enable_parallel_apply='onnn'")) -} - func TestMultipleApply(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -404,118 +383,6 @@ func TestApplyWithOtherOperators(t *testing.T) { tk.MustQuery(sql).Sort().Check(testkit.Rows("1")) } -func TestApplyWithOtherFeatures(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_enable_parallel_apply=true") - - // collation 1 - tk.MustExec("drop table if exists t, t1") - tk.MustExec("create table t(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int)") - tk.MustExec("create table t1(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int)") - tk.MustExec("insert into t values ('a', 1), ('A', 2), ('a', 3), ('A', 4)") - tk.MustExec("insert into t1 values ('a', 1), ('A', 2), ('a', 3), ('A', 4)") - sql := "select (select min(t1.b) from t1 where t1.a >= t.a), (select sum(t1.b) from t1 where t1.a >= t.a) from t" - tk.MustQuery(sql).Sort().Check(testkit.Rows("1 10", "1 10", "1 10", "1 10")) - - // collation 2 - sql = "select (select min(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b), (select sum(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) from t" - tk.MustQuery(sql).Sort().Check(testkit.Rows("1 10", "2 9", "3 7", "4 4")) - collate.SetNewCollationEnabledForTest(false) - defer collate.SetNewCollationEnabledForTest(true) - - // plan cache - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t1 values (1, 1), (1, 5), (2, 3), (2, 4), (3, 3)") - tk.MustExec("insert into t2 values (0, 1), (2, -1), (3, 2)") - tk.MustExec(`prepare stmt from "select * from t1 where t1.b >= (select sum(t2.b) from t2 where t2.a > t1.a and t2.a > ?)"`) - tk.MustExec("set @a=1") - tk.MustQuery("execute stmt using @a").Sort().Check(testkit.Rows("1 1", "1 5", "2 3", "2 4")) - tk.MustExec("set @a=2") - tk.MustQuery("execute stmt using @a").Sort().Check(testkit.Rows("1 5", "2 3", "2 4")) - tk.MustQuery(" select @@last_plan_from_cache").Check(testkit.Rows("0")) // sub-queries are not cacheable - - // cluster index - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - tk.MustExec("drop table if exists t, t2") - tk.MustExec("create table t(a int, b int, c int, primary key(a, b))") - tk.MustExec("create table t2(a int, b int, c int, primary key(a, c))") - tk.MustExec("insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)") - tk.MustExec("insert into t2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)") - sql = "select * from t where (select min(t2.b) from t2 where t2.a > t.a) > 0" - tk.MustQuery(sql).Sort().Check(testkit.Rows("1 1 1", "2 2 2", "3 3 3")) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeIntOnly - - // partitioning table - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int) partition by range(a) (partition p0 values less than(10), partition p1 values less than(20), partition p2 values less than(30), partition p3 values less than(40))") - tk.MustExec("create table t2(a int, b int) partition by hash(a) partitions 4") - tk.MustExec("insert into t1 values (5, 5), (15, 15), (25, 25), (35, 35)") - tk.MustExec("insert into t2 values (5, 5), (15, 15), (25, 25), (35, 35)") - sql = "select (select count(*) from t2 where t2.a > t1.b and t2.a=20), (select max(t2.b) from t2 where t2.a between t1.a and 20) from t1 where t1.a > 10" - tk.MustQuery(sql).Sort().Check(testkit.Rows("0 15", "0 ", "0 ")) -} - -func TestApplyInDML(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_enable_parallel_apply=true") - - // delete - tk.MustExec("drop table if exists t, t2") - tk.MustExec("create table t(a bigint, b int)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4)") - tk.MustExec("insert into t2 values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4)") - tk.MustExec("delete from t where (select min(t2.a) * 2 from t2 where t2.a < t.a) > 1") - tk.MustQuery("select * from t").Sort().Check(testkit.Rows("1 1", "1 1")) - - // insert - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, c int)") - tk.MustExec("insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 1, 1), (2, 2, 2), (3, 3, 3)") - tk.MustExec("insert into t (select * from t where (select count(*) from t t1 where t1.b > t.a) > 2)") - tk.MustQuery("select * from t").Sort().Check(testkit.Rows("1 1 1", "1 1 1", "1 1 1", "1 1 1", "2 2 2", "2 2 2", "3 3 3", "3 3 3")) - - // update - tk.MustExec("drop table if exists t, t2") - tk.MustExec("create table t(a smallint, b int)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3)") - tk.MustExec("insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3)") - tk.MustExec("update t set a = a + 1 where (select count(*) from t2 where t2.a <= t.a) in (1, 2)") - tk.MustQuery("select * from t").Sort().Check(testkit.Rows("2 1", "2 1", "2 2", "2 2", "3 3", "3 3")) - - // replace - tk.MustExec("drop table if exists t, t2") - tk.MustExec("create table t(a tinyint, b int, unique index idx(a))") - tk.MustExec("create table t2(a tinyint, b int)") - tk.MustExec("insert into t values (1, 1), (2, 2), (3, 3), (4, 4)") - tk.MustExec("insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3)") - tk.MustExec("replace into t (select pow(t2.a, 2), t2.b from t2 where (select min(t.a) from t where t.a > t2.a) between 1 and 5)") - tk.MustQuery("select * from t").Sort().Check(testkit.Rows("1 1", "2 2", "3 3", "4 2", "9 3")) - - // Transaction - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int, b int)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t1 values (1, 2), (1, 3)") - tk.MustExec("begin") - tk.MustExec("insert into t1 values (1, 4), (2, 3), (2, 5)") - tk.MustExec("insert into t2 values (2, 3), (3, 4)") - sql := "select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b)" - tk.MustQuery(sql).Sort().Check(testkit.Rows("1 4", "2 5")) - tk.MustExec("delete from t1 where a = 1") - tk.MustQuery(sql).Sort().Check(testkit.Rows("2 5")) - tk.MustExec("commit") - tk.MustQuery(sql).Sort().Check(testkit.Rows("2 5")) -} - func TestApplyConcurrency(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -622,16 +489,3 @@ func TestApplyGoroutinePanic(t *testing.T) { require.NoError(t, failpoint.Disable(panicPath)) } } - -func TestIssue24930(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_enable_parallel_apply=true") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1(a int)") - tk.MustExec("create table t2(a int)") - tk.MustQuery(`select case when t1.a is null - then (select t2.a from t2 where t2.a = t1.a limit 1) else t1.a end a - from t1 where t1.a=1 order by a limit 1`).Check(testkit.Rows()) // can return an empty result instead of hanging forever -} diff --git a/pkg/executor/partition_table_test.go b/pkg/executor/partition_table_test.go index 3c9dda3fe799c..764c689f51364 100644 --- a/pkg/executor/partition_table_test.go +++ b/pkg/executor/partition_table_test.go @@ -27,140 +27,12 @@ import ( "github.com/pingcap/tidb/pkg/domain" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" - "github.com/pingcap/tidb/pkg/testkit/testdata" - "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/stretchr/testify/require" ) -func TestSetPartitionPruneMode(t *testing.T) { - store := testkit.CreateMockStore(t) - - tkInit := testkit.NewTestKit(t, store) - tkInit.MustExec(`set @@session.tidb_partition_prune_mode = DEFAULT`) - tkInit.MustQuery("show warnings").Check(testkit.Rows()) - tkInit.MustExec(`set @@global.tidb_partition_prune_mode = DEFAULT`) - tkInit.MustQuery("show warnings").Check(testkit.Rows("Warning 1105 Please analyze all partition tables again for consistency between partition and global stats")) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("select @@global.tidb_partition_prune_mode").Check(testkit.Rows("dynamic")) - tk.MustQuery("select @@session.tidb_partition_prune_mode").Check(testkit.Rows("dynamic")) - tk.MustExec(`set @@session.tidb_partition_prune_mode = "static"`) - tk.MustQuery("show warnings").Check(testkit.Rows()) - tk.MustExec(`set @@global.tidb_partition_prune_mode = "static"`) - tk.MustQuery("show warnings").Check(testkit.Rows()) - tk2 := testkit.NewTestKit(t, store) - tk2.MustQuery("select @@session.tidb_partition_prune_mode").Check(testkit.Rows("static")) - tk2.MustQuery("show warnings").Check(testkit.Rows()) - tk2.MustQuery("select @@global.tidb_partition_prune_mode").Check(testkit.Rows("static")) - tk2.MustExec(`set @@session.tidb_partition_prune_mode = "dynamic"`) - tk2.MustQuery("show warnings").Sort().Check(testkit.Rows( - `Warning 1105 Please analyze all partition tables again for consistency between partition and global stats`, - `Warning 1105 Please avoid setting partition prune mode to dynamic at session level and set partition prune mode to dynamic at global level`)) - tk2.MustExec(`set @@global.tidb_partition_prune_mode = "dynamic"`) - tk2.MustQuery("show warnings").Check(testkit.Rows(`Warning 1105 Please analyze all partition tables again for consistency between partition and global stats`)) - tk3 := testkit.NewTestKit(t, store) - tk3.MustQuery("select @@global.tidb_partition_prune_mode").Check(testkit.Rows("dynamic")) - tk3.MustQuery("select @@session.tidb_partition_prune_mode").Check(testkit.Rows("dynamic")) -} - -func TestFourReader(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists pt") - tk.MustExec(`create table pt (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( -partition p0 values less than (4), -partition p1 values less than (7), -partition p2 values less than (10))`) - tk.MustExec("insert into pt values (0, 0), (2, 2), (4, 4), (6, 6), (7, 7), (9, 9), (null, null)") - - // Table reader - tk.MustQuery("select * from pt").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6", "7 7", "9 9", " ")) - // Table reader: table dual - tk.MustQuery("select * from pt where c > 10").Check(testkit.Rows()) - // Table reader: one partition - tk.MustQuery("select * from pt where c > 8").Check(testkit.Rows("9 9")) - // Table reader: more than one partition - tk.MustQuery("select * from pt where c < 2 or c >= 9").Sort().Check(testkit.Rows("0 0", "9 9")) - - // Index reader - tk.MustQuery("select c from pt").Sort().Check(testkit.Rows("0", "2", "4", "6", "7", "9", "")) - tk.MustQuery("select c from pt where c > 10").Check(testkit.Rows()) - tk.MustQuery("select c from pt where c > 8").Check(testkit.Rows("9")) - tk.MustQuery("select c from pt where c < 2 or c >= 9").Sort().Check(testkit.Rows("0", "9")) - - // Index lookup - tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6", "7 7", "9 9", " ")) - tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 4 and c > 10").Check(testkit.Rows()) - tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c > 8").Check(testkit.Rows("9 9")) - tk.MustQuery("select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9").Sort().Check(testkit.Rows("0 0", "9 9")) - - // Index Merge - tk.MustExec("set @@tidb_enable_index_merge = 1") - tk.MustQuery("select /*+ use_index(i_c, i_id) */ * from pt where id = 4 or c < 7").Sort().Check(testkit.Rows("0 0", "2 2", "4 4", "6 6")) -} - -func TestPartitionIndexJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set @@session.tidb_enable_table_partition = 1") - tk.MustExec("set @@session.tidb_enable_list_partition = 1") - for i := 0; i < 3; i++ { - tk.MustExec("drop table if exists p, t") - if i == 0 { - // Test for range partition - tk.MustExec(`create table p (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( - partition p0 values less than (4), - partition p1 values less than (7), - partition p2 values less than (10))`) - } else if i == 1 { - // Test for list partition - tk.MustExec(`create table p (id int, c int, key i_id(id), key i_c(c)) partition by list (c) ( - partition p0 values in (1,2,3,4), - partition p1 values in (5,6,7), - partition p2 values in (8, 9,10))`) - } else { - // Test for hash partition - tk.MustExec(`create table p (id int, c int, key i_id(id), key i_c(c)) partition by hash(c) partitions 5;`) - } - - tk.MustExec("create table t (id int)") - tk.MustExec("insert into p values (3,3), (4,4), (6,6), (9,9)") - tk.MustExec("insert into t values (4), (9)") - - // Build indexLookUp in index join - tk.MustQuery("select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id").Sort().Check(testkit.Rows("4 4 4", "9 9 9")) - // Build index reader in index join - tk.MustQuery("select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id").Sort().Check(testkit.Rows("4", "9")) - } -} - -func TestPartitionUnionScanIndexJoin(t *testing.T) { - // For issue https://github.com/pingcap/tidb/issues/19152 - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_int int, c_str varchar(40), primary key (c_int)) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue)") - tk.MustExec("create table t2 (c_int int, c_str varchar(40), primary key (c_int, c_str)) partition by hash (c_int) partitions 4") - tk.MustExec("insert into t1 values (10, 'interesting neumann')") - tk.MustExec("insert into t2 select * from t1") - tk.MustExec("begin") - tk.MustExec("insert into t2 values (11, 'hopeful hoover');") - tk.MustQuery("select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11)").Check(testkit.Rows("10 interesting neumann 10 interesting neumann")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11)").Check(testkit.Rows("10 interesting neumann 10 interesting neumann")) - tk.MustExec("commit") -} - func TestPointGetwithRangeAndListPartitionTable(t *testing.T) { store := testkit.CreateMockStore(t) @@ -248,83 +120,6 @@ func TestPointGetwithRangeAndListPartitionTable(t *testing.T) { tk.MustQuery(queryOnePartition).Check(testkit.Rows(fmt.Sprintf("%v", -1))) } -func TestPartitionReaderUnderApply(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - // For issue 19458. - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(c_int int)") - tk.MustExec("insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9)") - tk.MustExec("DROP TABLE IF EXISTS `t1`") - tk.MustExec(`CREATE TABLE t1 ( - c_int int NOT NULL, - c_str varchar(40) NOT NULL, - c_datetime datetime NOT NULL, - c_timestamp timestamp NULL DEFAULT NULL, - c_double double DEFAULT NULL, - c_decimal decimal(12,6) DEFAULT NULL, - PRIMARY KEY (c_int,c_str,c_datetime) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci - PARTITION BY RANGE (c_int) - (PARTITION p0 VALUES LESS THAN (2) ENGINE = InnoDB, - PARTITION p1 VALUES LESS THAN (4) ENGINE = InnoDB, - PARTITION p2 VALUES LESS THAN (6) ENGINE = InnoDB, - PARTITION p3 VALUES LESS THAN (8) ENGINE = InnoDB, - PARTITION p4 VALUES LESS THAN (10) ENGINE = InnoDB, - PARTITION p5 VALUES LESS THAN (20) ENGINE = InnoDB, - PARTITION p6 VALUES LESS THAN (50) ENGINE = InnoDB, - PARTITION p7 VALUES LESS THAN (1000000000) ENGINE = InnoDB)`) - tk.MustExec("INSERT INTO `t1` VALUES (19,'nifty feistel','2020-02-28 04:01:28','2020-02-04 06:11:57',32.430079,1.284000),(20,'objective snyder','2020-04-15 17:55:04','2020-05-30 22:04:13',37.690874,9.372000)") - tk.MustExec("begin") - tk.MustExec("insert into t1 values (22, 'wizardly saha', '2020-05-03 16:35:22', '2020-05-03 02:18:42', 96.534810, 0.088)") - tk.MustQuery("select c_int from t where (select min(t1.c_int) from t1 where t1.c_int > t.c_int) > (select count(*) from t1 where t1.c_int > t.c_int) order by c_int").Check(testkit.Rows( - "1", "2", "3", "4", "5", "6", "7", "8", "9")) - tk.MustExec("rollback") - - // For issue 19450. - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int))") - tk.MustExec("create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)) partition by hash (c_int) partitions 4") - tk.MustExec("insert into t1 values (1, 'romantic robinson', 4.436), (2, 'stoic chaplygin', 9.826), (3, 'vibrant shamir', 6.300), (4, 'hungry wilson', 4.900), (5, 'naughty swartz', 9.524)") - tk.MustExec("insert into t2 select * from t1") - tk.MustQuery("select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str)").Check(testkit.Rows( - "1 romantic robinson 4.436000", - "2 stoic chaplygin 9.826000", - "3 vibrant shamir 6.300000", - "4 hungry wilson 4.900000", - "5 naughty swartz 9.524000")) - - // For issue 19450 release-4.0 - tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Static) + `'`) - tk.MustQuery("select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str)").Check(testkit.Rows( - "1 romantic robinson 4.436000", - "2 stoic chaplygin 9.826000", - "3 vibrant shamir 6.300000", - "4 hungry wilson 4.900000", - "5 naughty swartz 9.524000")) -} - -func TestImproveCoverage(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`create table coverage_rr ( -pk1 varchar(35) NOT NULL, -pk2 int NOT NULL, -c int, -PRIMARY KEY (pk1,pk2)) partition by hash(pk2) partitions 4;`) - tk.MustExec("create table coverage_dt (pk1 varchar(35), pk2 int)") - tk.MustExec("insert into coverage_rr values ('ios', 3, 2),('android', 4, 7),('linux',5,1)") - tk.MustExec("insert into coverage_dt values ('apple',3),('ios',3),('linux',5)") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustQuery("select /*+ INL_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2);").Sort().Check(testkit.Rows("ios 3 ios 3 2", "linux 5 linux 5 1")) - tk.MustQuery("select /*+ INL_MERGE_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2);").Sort().Check(testkit.Rows("ios 3 ios 3 2", "linux 5 linux 5 1")) -} - func TestPartitionInfoDisable(t *testing.T) { store := testkit.CreateMockStore(t) @@ -824,123 +619,11 @@ func TestOrderByAndLimit(t *testing.T) { tk.MustExec("set global tidb_mem_oom_action=CANCEL") err := tk.QueryToErr("select /*+ LIMIT_TO_COP() */ a from trange use index(idx_a) where a > 1 order by a limit 2000") require.Error(t, err) - require.Regexp(t, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery, err) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) tk.MustExec(fmt.Sprintf("set session tidb_mem_quota_query=%s", originMemQuota)) tk.MustExec(fmt.Sprintf("set global tidb_mem_oom_action=%s", originOOMAction)) } -func TestOrderByOnUnsignedPk(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table tunsigned_hash(a bigint unsigned primary key) partition by hash(a) partitions 6") - tk.MustExec("insert into tunsigned_hash values(25), (9279808998424041135)") - tk.MustQuery("select min(a) from tunsigned_hash").Check(testkit.Rows("25")) - tk.MustQuery("select max(a) from tunsigned_hash").Check(testkit.Rows("9279808998424041135")) -} - -func TestPartitionHandleWithKeepOrder(t *testing.T) { - // https://github.com/pingcap/tidb/issues/44312 - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t (id int not null, store_id int not null )" + - "partition by range (store_id)" + - "(partition p0 values less than (6)," + - "partition p1 values less than (11)," + - "partition p2 values less than (16)," + - "partition p3 values less than (21))") - tk.MustExec("create table t1(id int not null, store_id int not null)") - tk.MustExec("insert into t values (1, 1)") - tk.MustExec("insert into t values (2, 17)") - tk.MustExec("insert into t1 values (0, 18)") - tk.MustExec("alter table t exchange partition p3 with table t1") - tk.MustExec("alter table t add index idx(id)") - tk.MustExec("analyze table t") - tk.MustQuery("select *,_tidb_rowid from t use index(idx) order by id limit 2").Check(testkit.Rows("0 18 1", "1 1 1")) - - tk.MustExec("drop table t, t1") - tk.MustExec("create table t (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c))" + - "partition by range (b)" + - "(partition p0 values less than (6)," + - "partition p1 values less than (11)," + - "partition p2 values less than (16)," + - "partition p3 values less than (21))") - tk.MustExec("create table t1 (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c))") - tk.MustExec("insert into t values (1,2,3), (2,3,4), (3,4,5)") - tk.MustExec("insert into t1 values (1,18,3)") - tk.MustExec("alter table t exchange partition p3 with table t1") - tk.MustExec("analyze table t") - tk.MustQuery("select * from t where a = 1 or b = 5 order by c limit 2").Sort().Check(testkit.Rows("1 18 3", "1 2 3")) -} - -func TestOrderByOnHandle(t *testing.T) { - // https://github.com/pingcap/tidb/issues/44266 - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - for i := 0; i < 2; i++ { - // indexLookUp + _tidb_rowid - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t`(" + - "`a` int(11) NOT NULL," + - "`b` int(11) DEFAULT NULL," + - "`c` int(11) DEFAULT NULL," + - "KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2;") - tk.MustExec("insert into t values (2,-1,3), (3,2,2), (1,1,1);") - if i == 1 { - tk.MustExec("analyze table t") - } - tk.MustQuery("select * from t use index(idx_b) order by b, _tidb_rowid limit 10;").Check(testkit.Rows("2 -1 3", "1 1 1", "3 2 2")) - - // indexLookUp + pkIsHandle - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t`(" + - "`a` int(11) NOT NULL," + - "`b` int(11) DEFAULT NULL," + - "`c` int(11) DEFAULT NULL," + - "primary key(`a`)," + - "KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2;") - tk.MustExec("insert into t values (2,-1,3), (3,2,2), (1,1,1);") - if i == 1 { - tk.MustExec("analyze table t") - } - tk.MustQuery("select * from t use index(idx_b) order by b, a limit 10;").Check(testkit.Rows("2 -1 3", "1 1 1", "3 2 2")) - - // indexMerge + _tidb_rowid - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t`(" + - "`a` int(11) NOT NULL," + - "`b` int(11) DEFAULT NULL," + - "`c` int(11) DEFAULT NULL," + - "KEY `idx_b` (`b`)," + - "KEY `idx_c` (`c`)) PARTITION BY HASH (`a`) PARTITIONS 2;") - tk.MustExec("insert into t values (2,-1,3), (3,2,2), (1,1,1);") - if i == 1 { - tk.MustExec("analyze table t") - } - tk.MustQuery("select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10;").Check(testkit.Rows("3 2 2", "1 1 1")) - - // indexMerge + pkIsHandle - tk.MustExec("drop table if exists t") - tk.MustExec("CREATE TABLE `t`(" + - "`a` int(11) NOT NULL," + - "`b` int(11) DEFAULT NULL," + - "`c` int(11) DEFAULT NULL," + - "KEY `idx_b` (`b`)," + - "KEY `idx_c` (`c`)," + - "PRIMARY KEY (`a`)) PARTITION BY HASH (`a`) PARTITIONS 2;") - tk.MustExec("insert into t values (2,-1,3), (3,2,2), (1,1,1);") - if i == 1 { - tk.MustExec("analyze table t") - } - tk.MustQuery("select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10;").Check(testkit.Rows("1 1 1", "3 2 2")) - } -} - func TestBatchGetandPointGetwithHashPartition(t *testing.T) { store := testkit.CreateMockStore(t) @@ -1243,70 +926,6 @@ func TestDynamicPruningUnderIndexJoin(t *testing.T) { tk.MustQuery(`select /*+ INL_JOIN(touter, tnormal) */ tnormal.* from touter join tnormal use index(idx_b) on touter.b = tnormal.b`).Sort().Rows()) } -func TestIssue25527(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_25527") - tk.MustExec("use test_issue_25527") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("set @@session.tidb_enable_list_partition = ON") - - // the original case - tk.MustExec(`CREATE TABLE t ( - col1 tinyint(4) primary key - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( COL1 DIV 80 ) - PARTITIONS 6`) - tk.MustExec(`insert into t values(-128), (107)`) - tk.MustExec(`prepare stmt from 'select col1 from t where col1 in (?, ?, ?)'`) - tk.MustExec(`set @a=-128, @b=107, @c=-128`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Sort().Check(testkit.Rows("-128", "107")) - - // the minimal reproducible case for hash partitioning - tk.MustExec(`CREATE TABLE t0 (a int primary key) PARTITION BY HASH( a DIV 80 ) PARTITIONS 2`) - tk.MustExec(`insert into t0 values (1)`) - tk.MustQuery(`select a from t0 where a in (1)`).Check(testkit.Rows("1")) - - // the minimal reproducible case for range partitioning - tk.MustExec(`create table t1 (a int primary key) partition by range (a+5) ( - partition p0 values less than(10), partition p1 values less than(20))`) - tk.MustExec(`insert into t1 values (5)`) - tk.MustQuery(`select a from t1 where a in (5)`).Check(testkit.Rows("5")) - - // the minimal reproducible case for list partitioning - tk.MustExec(`create table t2 (a int primary key) partition by list (a+5) ( - partition p0 values in (5, 6, 7, 8), partition p1 values in (9, 10, 11, 12))`) - tk.MustExec(`insert into t2 values (5)`) - tk.MustQuery(`select a from t2 where a in (5)`).Check(testkit.Rows("5")) -} - -func TestIssue25598(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_25598") - tk.MustExec("use test_issue_25598") - tk.MustExec(`CREATE TABLE UK_HP16726 ( - COL1 bigint(16) DEFAULT NULL, - COL2 varchar(20) DEFAULT NULL, - COL4 datetime DEFAULT NULL, - COL3 bigint(20) DEFAULT NULL, - COL5 float DEFAULT NULL, - UNIQUE KEY UK_COL1 (COL1) /*!80000 INVISIBLE */ - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin - PARTITION BY HASH( COL1 ) - PARTITIONS 25`) - - tk.MustQuery(`select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807`).Check(testkit.Rows()) - tk.MustExec(`explain select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807`) - - tk.MustExec(`set @@tidb_partition_prune_mode = 'dynamic'`) - tk.MustQuery(`select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807`).Check(testkit.Rows()) - tk.MustExec(`explain select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807`) -} - func TestBatchGetforRangeandListPartitionTable(t *testing.T) { store := testkit.CreateMockStore(t) @@ -1720,596 +1339,6 @@ func TestPartitionTableWithDifferentJoin(t *testing.T) { tk.MustQuery(queryHash).Sort().Check(tk.MustQuery(queryRegular).Sort().Rows()) } -func createTable4DynamicPruneModeTestWithExpression(tk *testkit.TestKit) { - tk.MustExec("create table trange(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11));") - tk.MustExec("create table thash(a int, b int) partition by hash(a) partitions 4;") - tk.MustExec("create table t(a int, b int)") - tk.MustExec("insert into trange values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1);") - tk.MustExec("insert into thash values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1);") - tk.MustExec("insert into t values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1);") - tk.MustExec("set session tidb_partition_prune_mode='dynamic'") - tk.MustExec("analyze table trange") - tk.MustExec("analyze table thash") - tk.MustExec("analyze table t") -} - -type testData4Expression struct { - sql string - partitions []string -} - -func TestDateColWithUnequalExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_datetime_unequal_expression") - tk.MustExec("create database db_datetime_unequal_expression") - tk.MustExec("use db_datetime_unequal_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec(`create table tp(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00"))`) - tk.MustExec(`create table t(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00"))`) - tk.MustExec(`insert into tp values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3)`) - tk.MustExec(`insert into t values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3)`) - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a != '2024-01-01 01:01:01'", - partitions: []string{"all"}, - }, - { - sql: "select * from %s where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'", - partitions: []string{"p1,p2"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestToDaysColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_to_days_expression") - tk.MustExec("create database db_to_days_expression") - tk.MustExec("use db_to_days_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a date, b int) partition by range(to_days(a)) (partition p0 values less than (737822), partition p1 values less than (738019), partition p2 values less than (738154))") - tk.MustExec("create table t(a date, b int)") - tk.MustExec("insert into tp values('2020-01-01', 1), ('2020-03-02', 2), ('2020-05-05', 3), ('2020-11-11', 4)") - tk.MustExec("insert into t values('2020-01-01', 1), ('2020-03-02', 2), ('2020-05-05', 3), ('2020-11-11', 4)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a < '2020-08-16'", - partitions: []string{"p0,p1"}, - }, - { - sql: "select * from %s where a between '2020-05-01' and '2020-10-01'", - partitions: []string{"p1,p2"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestWeekdayWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_weekday_expression") - tk.MustExec("create database db_weekday_expression") - tk.MustExec("use db_weekday_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a datetime, b int) partition by range(weekday(a)) (partition p0 values less than(3), partition p1 values less than(5), partition p2 values less than(8))") - tk.MustExec("create table t(a datetime, b int)") - tk.MustExec(`insert into tp values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0)`) - tk.MustExec(`insert into t values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0)`) - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a = '2020-08-17 00:00:00'", - partitions: []string{"p0"}, - }, - { - sql: "select * from %s where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'", - partitions: []string{"p1"}, - }, - { - sql: " select * from %s where a < '2020-08-19 00:00:00'", - partitions: []string{"all"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestFloorUnixTimestampAndIntColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_floor_unix_timestamp_int_expression") - tk.MustExec("create database db_floor_unix_timestamp_int_expression") - tk.MustExec("use db_floor_unix_timestamp_int_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a timestamp, b int) partition by range(floor(unix_timestamp(a))) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400))") - tk.MustExec("create table t(a timestamp, b int)") - tk.MustExec("insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3)") - tk.MustExec("insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a > '2020-09-11 00:00:00'", - partitions: []string{"p2"}, - }, - { - sql: "select * from %s where a < '2020-07-07 01:00:00'", - partitions: []string{"p0,p1"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestUnixTimestampAndIntColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_unix_timestamp_int_expression") - tk.MustExec("create database db_unix_timestamp_int_expression") - tk.MustExec("use db_unix_timestamp_int_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a timestamp, b int) partition by range(unix_timestamp(a)) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400))") - tk.MustExec("create table t(a timestamp, b int)") - tk.MustExec("insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3)") - tk.MustExec("insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a > '2020-09-11 00:00:00'", - partitions: []string{"p2"}, - }, - { - sql: "select * from %s where a < '2020-07-07 01:00:00'", - partitions: []string{"p0,p1"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestDatetimeColAndIntColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_datetime_int_expression") - tk.MustExec("create database db_datetime_int_expression") - tk.MustExec("use db_datetime_int_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a datetime, b int) partition by range columns(a) (partition p0 values less than('2020-02-02 00:00:00'), partition p1 values less than('2020-09-01 00:00:00'), partition p2 values less than('2020-12-20 00:00:00'))") - tk.MustExec("create table t(a datetime, b int)") - tk.MustExec("insert into tp values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4)") - tk.MustExec("insert into t values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a < '2020-09-01 00:00:00'", - partitions: []string{"p0,p1"}, - }, - { - sql: "select * from %s where a > '2020-07-07 01:00:00'", - partitions: []string{"p1,p2"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestVarcharColAndIntColWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_varchar_int_expression") - tk.MustExec("create database db_varchar_int_expression") - tk.MustExec("use db_varchar_int_expression") - tk.MustExec("set tidb_partition_prune_mode='dynamic'") - tk.MustExec("create table tp(a varchar(255), b int) partition by range columns(a) (partition p0 values less than('ddd'), partition p1 values less than('ggggg'), partition p2 values less than('mmmmmm'))") - tk.MustExec("create table t(a varchar(255), b int)") - tk.MustExec("insert into tp values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6)") - tk.MustExec("insert into t values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6)") - tk.MustExec("analyze table tp") - tk.MustExec("analyze table t") - - tests := []testData4Expression{ - { - sql: "select * from %s where a < '10'", - partitions: []string{"p0"}, - }, - { - sql: "select * from %s where a > 0", - partitions: []string{"all"}, - }, - { - sql: "select * from %s where a < 0", - partitions: []string{"all"}, - }, - } - - for _, t := range tests { - tpSQL := fmt.Sprintf(t.sql, "tp") - tSQL := fmt.Sprintf(t.sql, "t") - tk.MustPartition(tpSQL, t.partitions[0]).Sort().Check(tk.MustQuery(tSQL).Sort().Rows()) - } -} - -func TestDynamicPruneModeWithExpression(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists db_equal_expression") - tk.MustExec("create database db_equal_expression") - tk.MustExec("use db_equal_expression") - createTable4DynamicPruneModeTestWithExpression(tk) - - tables := []string{"trange", "thash"} - tests := []testData4Expression{ - { - sql: "select * from %s where a = 2", - partitions: []string{ - "p0", - "p2", - }, - }, - { - sql: "select * from %s where a = 4 or a = 1", - partitions: []string{ - "p0,p1", - "p0,p1", - }, - }, - { - sql: "select * from %s where a = -1", - partitions: []string{ - "p0", - "p1", - }, - }, - { - sql: "select * from %s where a is NULL", - partitions: []string{ - "p0", - "p0", - }, - }, - { - sql: "select * from %s where b is NULL", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a > -1", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a >= 4 and a <= 5", - partitions: []string{ - "p1,p2", - "p0,p1", - }, - }, - { - sql: "select * from %s where a > 10", - partitions: []string{ - "dual", - "all", - }, - }, - { - sql: "select * from %s where a >=2 and a <= 3", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a between 2 and 3", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a < 2", - partitions: []string{ - "p0", - "all", - }, - }, - { - sql: "select * from %s where a <= 3", - partitions: []string{ - "p0,p1", - "all", - }, - }, - { - sql: "select * from %s where a in (2, 3)", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a in (1, 5)", - partitions: []string{ - "p0,p2", - "p1", - }, - }, - { - sql: "select * from %s where a not in (1, 5)", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a = 2 and a = 2", - partitions: []string{ - "p0", - "p2", - }, - }, - { - sql: "select * from %s where a = 2 and a = 3", - partitions: []string{ - // This means that we have no partition-read plan - "", - "", - }, - }, - { - sql: "select * from %s where a < 2 and a > 0", - partitions: []string{ - "p0", - "p1", - }, - }, - { - sql: "select * from %s where a < 2 and a < 3", - partitions: []string{ - "p0", - "all", - }, - }, - { - sql: "select * from %s where a > 1 and a > 2", - partitions: []string{ - "p1,p2", - "all", - }, - }, - { - sql: "select * from %s where a = 2 or a = 3", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a = 2 or a in (3)", - partitions: []string{ - "p0,p1", - "p2,p3", - }, - }, - { - sql: "select * from %s where a = 2 or a > 3", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a = 2 or a <= 1", - partitions: []string{ - "p0", - "all", - }, - }, - { - sql: "select * from %s where a = 2 or a between 2 and 2", - partitions: []string{ - "p0", - "p2", - }, - }, - { - sql: "select * from %s where a != 2", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a != 2 and a > 4", - partitions: []string{ - "p2", - "all", - }, - }, - { - sql: "select * from %s where a != 2 and a != 3", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a != 2 and a = 3", - partitions: []string{ - "p1", - "p3", - }, - }, - { - sql: "select * from %s where not (a = 2)", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where not (a > 2)", - partitions: []string{ - "p0", - "all", - }, - }, - { - sql: "select * from %s where not (a < 2)", - partitions: []string{ - "all", - "all", - }, - }, - // cases that partition pruning can not work - { - sql: "select * from %s where a + 1 > 4", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a - 1 > 0", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a * 2 < 0", - partitions: []string{ - "all", - "all", - }, - }, - { - sql: "select * from %s where a << 1 < 0", - partitions: []string{ - "all", - "all", - }, - }, - // comparison between int column and string column - { - sql: "select * from %s where a > '10'", - partitions: []string{ - "dual", - "all", - }, - }, - { - sql: "select * from %s where a > '10ab'", - partitions: []string{ - "dual", - "all", - }, - }, - } - - for _, t := range tests { - for i := range t.partitions { - sql := fmt.Sprintf(t.sql, tables[i]) - tk.MustPartition(sql, t.partitions[i]).Sort().Check(tk.MustQuery(fmt.Sprintf(t.sql, "t")).Sort().Rows()) - } - } -} - -func TestAddDropPartitions(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_add_drop_partition") - tk.MustExec("use test_add_drop_partition") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - - tk.MustExec(`create table t(a int) partition by range(a) ( - partition p0 values less than (5), - partition p1 values less than (10), - partition p2 values less than (15))`) - tk.MustExec(`insert into t values (2), (7), (12)`) - tk.MustPartition(`select * from t where a < 3`, "p0").Sort().Check(testkit.Rows("2")) - tk.MustPartition(`select * from t where a < 8`, "p0,p1").Sort().Check(testkit.Rows("2", "7")) - tk.MustPartition(`select * from t where a < 20`, "all").Sort().Check(testkit.Rows("12", "2", "7")) - - // remove p0 - tk.MustExec(`alter table t drop partition p0`) - tk.MustPartition(`select * from t where a < 3`, "p1").Sort().Check(testkit.Rows()) - tk.MustPartition(`select * from t where a < 8`, "p1").Sort().Check(testkit.Rows("7")) - tk.MustPartition(`select * from t where a < 20`, "all").Sort().Check(testkit.Rows("12", "7")) - - // add 2 more partitions - tk.MustExec(`alter table t add partition (partition p3 values less than (20))`) - tk.MustExec(`alter table t add partition (partition p4 values less than (40))`) - tk.MustExec(`insert into t values (15), (25)`) - tk.MustPartition(`select * from t where a < 3`, "p1").Sort().Check(testkit.Rows()) - tk.MustPartition(`select * from t where a < 8`, "p1").Sort().Check(testkit.Rows("7")) - tk.MustPartition(`select * from t where a < 20`, "p1,p2,p3").Sort().Check(testkit.Rows("12", "15", "7")) -} - func TestMPPQueryExplainInfo(t *testing.T) { failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") @@ -2341,66 +1370,6 @@ func TestMPPQueryExplainInfo(t *testing.T) { tk.MustPartition(`select * from t where a < 5 union all select * from t where a > 10`, "p2").Sort().Check(testkit.Rows("12", "2")) } -func TestPartitionPruningInTransaction(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_pruning_transaction") - defer tk.MustExec(`drop database test_pruning_transaction`) - tk.MustExec("use test_pruning_transaction") - tk.MustExec(`create table t(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11))`) - tk.MustExec("set @@tidb_partition_prune_mode = 'static'") - tk.MustExec(`begin`) - tk.MustPartitionByList(`select * from t`, []string{"p0", "p1", "p2"}) - tk.MustPartitionByList(`select * from t where a > 3`, []string{"p1", "p2"}) // partition pruning can work in transactions - tk.MustPartitionByList(`select * from t where a > 7`, []string{"p2"}) - tk.MustExec(`rollback`) - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec(`begin`) - tk.MustPartition(`select * from t`, "all") - tk.MustPartition(`select * from t where a > 3`, "p1,p2") // partition pruning can work in transactions - tk.MustPartition(`select * from t where a > 7`, "p2") - tk.MustExec(`rollback`) - tk.MustExec("set @@tidb_partition_prune_mode = default") -} - -func TestIssue25253(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database issue25253") - defer tk.MustExec("drop database issue25253") - tk.MustExec("use issue25253") - - tk.MustExec(`CREATE TABLE IDT_HP23902 ( - COL1 smallint DEFAULT NULL, - COL2 varchar(20) DEFAULT NULL, - COL4 datetime DEFAULT NULL, - COL3 bigint DEFAULT NULL, - COL5 float DEFAULT NULL, - KEY UK_COL1 (COL1) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin - PARTITION BY HASH( COL1+30 ) - PARTITIONS 6`) - tk.MustExec(`insert ignore into IDT_HP23902 partition(p0, p1)(col1, col3) values(-10355, 1930590137900568573), (13810, -1332233145730692137)`) - tk.MustQuery(`show warnings`).Check(testkit.Rows("Warning 1748 Found a row not matching the given partition set", - "Warning 1748 Found a row not matching the given partition set")) - tk.MustQuery(`select * from IDT_HP23902`).Check(testkit.Rows()) - - tk.MustExec(`create table t ( - a int - ) partition by range(a) ( - partition p0 values less than (10), - partition p1 values less than (20))`) - tk.MustExec(`insert ignore into t partition(p0)(a) values(12)`) - tk.MustQuery(`show warnings`).Check(testkit.Rows("Warning 1748 Found a row not matching the given partition set")) - tk.MustQuery(`select * from t`).Check(testkit.Rows()) -} - func TestDML(t *testing.T) { store := testkit.CreateMockStore(t) @@ -2831,34 +1800,6 @@ func TestDirectReadingWithUnionScan(t *testing.T) { tk.MustExec(`rollback`) } -func TestIssue25030(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_25030") - tk.MustExec("use test_issue_25030") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - - tk.MustExec(`CREATE TABLE tbl_936 ( - col_5410 smallint NOT NULL, - col_5411 double, - col_5412 boolean NOT NULL DEFAULT 1, - col_5413 set('Alice', 'Bob', 'Charlie', 'David') NOT NULL DEFAULT 'Charlie', - col_5414 varbinary(147) COLLATE 'binary' DEFAULT 'bvpKgYWLfyuTiOYSkj', - col_5415 timestamp NOT NULL DEFAULT '2021-07-06', - col_5416 decimal(6, 6) DEFAULT 0.49, - col_5417 text COLLATE utf8_bin, - col_5418 float DEFAULT 2048.0762299371554, - col_5419 int UNSIGNED NOT NULL DEFAULT 3152326370, - PRIMARY KEY (col_5419) ) - PARTITION BY HASH (col_5419) PARTITIONS 3`) - tk.MustQuery(`SELECT last_value(col_5414) OVER w FROM tbl_936 - WINDOW w AS (ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419) - ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419, nth_value(col_5412, 5) OVER w`). - Check(testkit.Rows()) // can work properly without any error or panic -} - func TestUnsignedPartitionColumn(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3125,70 +2066,6 @@ func TestDirectReadingWithAgg(t *testing.T) { } } -func TestDynamicModeByDefault(t *testing.T) { - failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) - defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_dynamic_by_default") - - tk.MustExec(`create table trange(a int, b int, primary key(a) clustered, index idx_b(b)) partition by range(a) ( - partition p0 values less than(300), - partition p1 values less than(500), - partition p2 values less than(1100));`) - tk.MustExec(`create table thash(a int, b int, primary key(a) clustered, index idx_b(b)) partition by hash(a) partitions 4;`) - - for _, q := range []string{ - "explain select * from trange where a>400", - "explain select * from thash where a>=100", - } { - for _, r := range tk.MustQuery(q).Rows() { - require.NotContains(t, strings.ToLower(r[0].(string)), "partitionunion") - } - } -} - -func TestIssue24636(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_24636") - tk.MustExec("use test_issue_24636") - - tk.MustExec(`CREATE TABLE t (a int, b date, c int, PRIMARY KEY (a,b)) - PARTITION BY RANGE ( TO_DAYS(b) ) ( - PARTITION p0 VALUES LESS THAN (737821), - PARTITION p1 VALUES LESS THAN (738289) - )`) - tk.MustExec(`INSERT INTO t (a, b, c) VALUES(0, '2021-05-05', 0)`) - tk.MustQuery(`select c from t use index(primary) where a=0 limit 1`).Check(testkit.Rows("0")) - - tk.MustExec(` - CREATE TABLE test_partition ( - a varchar(100) NOT NULL, - b date NOT NULL, - c varchar(100) NOT NULL, - d datetime DEFAULT NULL, - e datetime DEFAULT NULL, - f bigint(20) DEFAULT NULL, - g bigint(20) DEFAULT NULL, - h bigint(20) DEFAULT NULL, - i bigint(20) DEFAULT NULL, - j bigint(20) DEFAULT NULL, - k bigint(20) DEFAULT NULL, - l bigint(20) DEFAULT NULL, - PRIMARY KEY (a,b,c) /*T![clustered_index] NONCLUSTERED */ - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin - PARTITION BY RANGE ( TO_DAYS(b) ) ( - PARTITION pmin VALUES LESS THAN (737821), - PARTITION p20200601 VALUES LESS THAN (738289))`) - tk.MustExec(`INSERT INTO test_partition (a, b, c, d, e, f, g, h, i, j, k, l) VALUES('aaa', '2021-05-05', '428ff6a1-bb37-42ac-9883-33d7a29961e6', '2021-05-06 08:13:38', '2021-05-06 13:28:08', 0, 8, 3, 0, 9, 1, 0)`) - tk.MustQuery(`select c,j,l from test_partition where c='428ff6a1-bb37-42ac-9883-33d7a29961e6' and a='aaa' limit 0, 200`).Check(testkit.Rows("428ff6a1-bb37-42ac-9883-33d7a29961e6 9 0")) -} - func TestIdexMerge(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3283,45 +2160,6 @@ func TestIdexMerge(t *testing.T) { } } -func TestIssue25309(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create database test_issue_25309") - tk.MustExec("use test_issue_25309") - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - - tk.MustExec(`CREATE TABLE tbl_500 ( - col_20 tinyint(4) NOT NULL, - col_21 varchar(399) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL, - col_22 json DEFAULT NULL, - col_23 blob DEFAULT NULL, - col_24 mediumint(9) NOT NULL, - col_25 float NOT NULL DEFAULT '7306.384497585912', - col_26 binary(196) NOT NULL, - col_27 timestamp DEFAULT '1976-12-08 00:00:00', - col_28 bigint(20) NOT NULL, - col_29 tinyint(1) NOT NULL DEFAULT '1', - PRIMARY KEY (col_29,col_20) /*T![clustered_index] NONCLUSTERED */, - KEY idx_7 (col_28,col_20,col_26,col_27,col_21,col_24), - KEY idx_8 (col_25,col_29,col_24) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin`) - - tk.MustExec(`CREATE TABLE tbl_600 ( - col_60 int(11) NOT NULL DEFAULT '-776833487', - col_61 tinyint(1) NOT NULL DEFAULT '1', - col_62 tinyint(4) NOT NULL DEFAULT '-125', - PRIMARY KEY (col_62,col_60,col_61) /*T![clustered_index] NONCLUSTERED */, - KEY idx_19 (col_60) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci - PARTITION BY HASH( col_60 ) - PARTITIONS 1`) - - tk.MustExec(`insert into tbl_500 select -34, 'lrfGPPPUuZjtT', '{"obj1": {"sub_obj0": 100}}', 0x6C47636D, 1325624, 7306.3843, 'abc', '1976-12-08', 4757891479624162031, 0`) - tk.MustQuery(`select tbl_5.* from tbl_500 tbl_5 where col_24 in ( select col_62 from tbl_600 where tbl_5.col_26 < 'hSvHLdQeGBNIyOFXStV' )`).Check(testkit.Rows()) -} - func TestGlobalIndexScan(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3409,35 +2247,6 @@ partition p2 values less than (10))`) failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/checkDropGlobalIndex") } -func TestIssue20028(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("set @@tidb_partition_prune_mode='static-only'") - tk.MustExec(`create table t1 (c_datetime datetime, primary key (c_datetime)) -partition by range (to_days(c_datetime)) ( partition p0 values less than (to_days('2020-02-01')), -partition p1 values less than (to_days('2020-04-01')), -partition p2 values less than (to_days('2020-06-01')), -partition p3 values less than maxvalue)`) - tk.MustExec("create table t2 (c_datetime datetime, unique key(c_datetime))") - tk.MustExec("insert into t1 values ('2020-06-26 03:24:00'), ('2020-02-21 07:15:33'), ('2020-04-27 13:50:58')") - tk.MustExec("insert into t2 values ('2020-01-10 09:36:00'), ('2020-02-04 06:00:00'), ('2020-06-12 03:45:18')") - tk.MustExec("begin") - tk.MustQuery("select * from t1 join t2 on t1.c_datetime >= t2.c_datetime for update"). - Sort(). - Check(testkit.Rows( - "2020-02-21 07:15:33 2020-01-10 09:36:00", - "2020-02-21 07:15:33 2020-02-04 06:00:00", - "2020-04-27 13:50:58 2020-01-10 09:36:00", - "2020-04-27 13:50:58 2020-02-04 06:00:00", - "2020-06-26 03:24:00 2020-01-10 09:36:00", - "2020-06-26 03:24:00 2020-02-04 06:00:00", - "2020-06-26 03:24:00 2020-06-12 03:45:18")) - tk.MustExec("rollback") -} - func TestSelectLockOnPartitionTable(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3571,215 +2380,6 @@ partition p2 values less than (11))`) } } -func TestIssue21731(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists p, t") - tk.MustExec("set @@tidb_enable_list_partition = OFF") - // Notice that this does not really test the issue #21731 - tk.MustExec("create table t (a int, b int, unique index idx(a)) partition by list columns(b) (partition p0 values in (1), partition p1 values in (2));") -} - -type testOutput struct { - SQL string - Plan []string - Res []string -} - -func verifyPartitionResult(tk *testkit.TestKit, input []string, output []testOutput) { - for i, tt := range input { - var isSelect = false - if strings.HasPrefix(strings.ToLower(tt), "select ") { - isSelect = true - } - testdata.OnRecord(func() { - output[i].SQL = tt - if isSelect { - output[i].Plan = testdata.ConvertRowsToStrings(tk.UsedPartitions(tt).Rows()) - output[i].Res = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Sort().Rows()) - } else { - // Just verify SELECT (also avoid double INSERTs during record) - output[i].Res = nil - output[i].Plan = nil - } - }) - if isSelect { - tk.UsedPartitions(tt).Check(testkit.Rows(output[i].Plan...)) - tk.MustQuery(tt).Sort().Check(testkit.Rows(output[i].Res...)) - } else { - tk.MustExec(tt) - } - } -} - -func TestRangePartitionBoundariesEq(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("SET @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("CREATE DATABASE TestRangePartitionBoundaries") - defer tk.MustExec("DROP DATABASE TestRangePartitionBoundaries") - tk.MustExec("USE TestRangePartitionBoundaries") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1000000), - PARTITION p1 VALUES LESS THAN (2000000), - PARTITION p2 VALUES LESS THAN (3000000)); -`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesNe(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("SET @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("CREATE DATABASE TestRangePartitionBoundariesNe") - defer tk.MustExec("DROP DATABASE TestRangePartitionBoundariesNe") - tk.MustExec("USE TestRangePartitionBoundariesNe") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1), - PARTITION p1 VALUES LESS THAN (2), - PARTITION p2 VALUES LESS THAN (3), - PARTITION p3 VALUES LESS THAN (4), - PARTITION p4 VALUES LESS THAN (5), - PARTITION p5 VALUES LESS THAN (6), - PARTITION p6 VALUES LESS THAN (7))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesBetweenM(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("CREATE DATABASE IF NOT EXISTS TestRangePartitionBoundariesBetweenM") - defer tk.MustExec("DROP DATABASE TestRangePartitionBoundariesBetweenM") - tk.MustExec("USE TestRangePartitionBoundariesBetweenM") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1000000), - PARTITION p1 VALUES LESS THAN (2000000), - PARTITION p2 VALUES LESS THAN (3000000))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesBetweenS(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("CREATE DATABASE IF NOT EXISTS TestRangePartitionBoundariesBetweenS") - defer tk.MustExec("DROP DATABASE TestRangePartitionBoundariesBetweenS") - tk.MustExec("USE TestRangePartitionBoundariesBetweenS") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1), - PARTITION p1 VALUES LESS THAN (2), - PARTITION p2 VALUES LESS THAN (3), - PARTITION p3 VALUES LESS THAN (4), - PARTITION p4 VALUES LESS THAN (5), - PARTITION p5 VALUES LESS THAN (6), - PARTITION p6 VALUES LESS THAN (7))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesLtM(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("create database TestRangePartitionBoundariesLtM") - defer tk.MustExec("drop database TestRangePartitionBoundariesLtM") - tk.MustExec("use TestRangePartitionBoundariesLtM") - tk.MustExec("drop table if exists t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1000000), - PARTITION p1 VALUES LESS THAN (2000000), - PARTITION p2 VALUES LESS THAN (3000000))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestRangePartitionBoundariesLtS(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec("create database TestRangePartitionBoundariesLtS") - defer tk.MustExec("drop database TestRangePartitionBoundariesLtS") - tk.MustExec("use TestRangePartitionBoundariesLtS") - tk.MustExec("drop table if exists t") - tk.MustExec(`CREATE TABLE t -(a INT, b varchar(255)) -PARTITION BY RANGE (a) ( - PARTITION p0 VALUES LESS THAN (1), - PARTITION p1 VALUES LESS THAN (2), - PARTITION p2 VALUES LESS THAN (3), - PARTITION p3 VALUES LESS THAN (4), - PARTITION p4 VALUES LESS THAN (5), - PARTITION p5 VALUES LESS THAN (6), - PARTITION p6 VALUES LESS THAN (7))`) - - var input []string - var output []testOutput - executorSuiteData.LoadTestCases(t, &input, &output) - verifyPartitionResult(tk, input, output) -} - -func TestIssue25528(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("set @@tidb_partition_prune_mode = 'static'") - tk.MustExec("use test") - tk.MustExec("create table issue25528 (id int primary key, balance DECIMAL(10, 2), balance2 DECIMAL(10, 2) GENERATED ALWAYS AS (-balance) VIRTUAL, created_at TIMESTAMP) PARTITION BY HASH(id) PARTITIONS 8") - tk.MustExec("insert into issue25528 (id, balance, created_at) values(1, 100, '2021-06-17 22:35:20')") - tk.MustExec("begin pessimistic") - tk.MustQuery("select * from issue25528 where id = 1 for update").Check(testkit.Rows("1 100.00 -100.00 2021-06-17 22:35:20")) - - tk.MustExec("drop table if exists issue25528") - tk.MustExec("CREATE TABLE `issue25528` ( `c1` int(11) NOT NULL, `c2` int(11) DEFAULT NULL, `c3` int(11) DEFAULT NULL, `c4` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) /*T![clustered_index] CLUSTERED */, KEY `k2` (`c2`), KEY `k3` (`c3`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( `c1` ) PARTITIONS 10;") - tk.MustExec("INSERT INTO issue25528 (`c1`, `c2`, `c3`, `c4`) VALUES (1, 1, 1, 1) , (3, 3, 3, 3) , (2, 2, 2, 2) , (4, 4, 4, 4);") - tk.MustQuery("select * from issue25528 where c1 in (3, 4) order by c2 for update;").Check(testkit.Rows("3 3 3 3", "4 4 4 4")) -} - func TestIssue26251(t *testing.T) { store := testkit.CreateMockStore(t) @@ -3952,48 +2552,6 @@ func TestIssue31024(t *testing.T) { tk2.MustExec("rollback") } -func TestIssue27346(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk1 := testkit.NewTestKit(t, store) - tk1.MustExec("create database TestIssue27346") - defer tk1.MustExec("drop database TestIssue27346") - tk1.MustExec("use TestIssue27346") - - tk1.MustExec("set @@tidb_enable_index_merge=1,@@tidb_partition_prune_mode='dynamic'") - - tk1.MustExec("DROP TABLE IF EXISTS `tbl_18`") - tk1.MustExec("CREATE TABLE `tbl_18` (`col_119` binary(16) NOT NULL DEFAULT 'skPoKiwYUi',`col_120` int(10) unsigned NOT NULL,`col_121` timestamp NOT NULL,`col_122` double NOT NULL DEFAULT '3937.1887880628115',`col_123` bigint(20) NOT NULL DEFAULT '3550098074891542725',PRIMARY KEY (`col_123`,`col_121`,`col_122`,`col_120`) CLUSTERED,UNIQUE KEY `idx_103` (`col_123`,`col_119`,`col_120`),UNIQUE KEY `idx_104` (`col_122`,`col_120`),UNIQUE KEY `idx_105` (`col_119`,`col_120`),KEY `idx_106` (`col_121`,`col_120`,`col_122`,`col_119`),KEY `idx_107` (`col_121`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PARTITION BY HASH( `col_120` ) PARTITIONS 3") - tk1.MustExec("INSERT INTO tbl_18 (`col_119`, `col_120`, `col_121`, `col_122`, `col_123`) VALUES (X'736b506f4b6977595569000000000000', 672436701, '1974-02-24 00:00:00', 3937.1887880628115e0, -7373106839136381229), (X'736b506f4b6977595569000000000000', 2637316689, '1993-10-29 00:00:00', 3937.1887880628115e0, -4522626077860026631), (X'736b506f4b6977595569000000000000', 831809724, '1995-11-20 00:00:00', 3937.1887880628115e0, -4426441253940231780), (X'736b506f4b6977595569000000000000', 1588592628, '2001-03-28 00:00:00', 3937.1887880628115e0, 1329207475772244999), (X'736b506f4b6977595569000000000000', 3908038471, '2031-06-06 00:00:00', 3937.1887880628115e0, -6562815696723135786), (X'736b506f4b6977595569000000000000', 1674237178, '2001-10-24 00:00:00', 3937.1887880628115e0, -6459065549188938772), (X'736b506f4b6977595569000000000000', 3507075493, '2010-03-25 00:00:00', 3937.1887880628115e0, -4329597025765326929), (X'736b506f4b6977595569000000000000', 1276461709, '2019-07-20 00:00:00', 3937.1887880628115e0, 3550098074891542725)") - - tk1.MustQuery("select col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update").Sort().Check(testkit.Rows( - "1588592628 3937.1887880628115 1329207475772244999", - "1674237178 3937.1887880628115 -6459065549188938772", - "2637316689 3937.1887880628115 -4522626077860026631", - "672436701 3937.1887880628115 -7373106839136381229", - "831809724 3937.1887880628115 -4426441253940231780")) - tk1.MustQuery("select /*+ use_index_merge( tbl_18 ) */ col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update").Sort().Check(testkit.Rows( - "1588592628 3937.1887880628115 1329207475772244999", - "1674237178 3937.1887880628115 -6459065549188938772", - "2637316689 3937.1887880628115 -4522626077860026631", - "672436701 3937.1887880628115 -7373106839136381229", - "831809724 3937.1887880628115 -4426441253940231780")) -} - -func TestIssue35181(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("create database TestIssue35181") - tk.MustExec("use TestIssue35181") - tk.MustExec("CREATE TABLE `t` (`a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL) PARTITION BY RANGE (`a`) (PARTITION `p0` VALUES LESS THAN (2021), PARTITION `p1` VALUES LESS THAN (3000))") - - tk.MustExec("set @@tidb_partition_prune_mode = 'static'") - tk.MustExec(`insert into t select * from t where a=3000`) - tk.MustExec("set @@tidb_partition_prune_mode = 'dynamic'") - tk.MustExec(`insert into t select * from t where a=3000`) -} - func TestIssue21732(t *testing.T) { failpoint.Enable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune", `return(true)`) defer failpoint.Disable("github.com/pingcap/tidb/pkg/planner/core/forceDynamicPrune") @@ -4224,73 +2782,3 @@ func TestGlobalIndexMerge(t *testing.T) { tk.MustQuery("select /*+ use_index_merge(t, uidx_ac, idx_bc) */ * from t where a=1 or b=2").Sort().Check( testkit.Rows("1 1 1 1", "2 2 2 2")) } - -func TestIssue39999(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - - tk.MustExec(`create schema test39999`) - tk.MustExec(`use test39999`) - tk.MustExec(`set @@tidb_opt_advanced_join_hint=0`) - tk.MustExec(`drop table if exists c, t`) - tk.MustExec("CREATE TABLE `c` (" + - "`serial_id` varchar(24)," + - "`occur_trade_date` date," + - "`txt_account_id` varchar(24)," + - "`capital_sub_class` varchar(10)," + - "`occur_amount` decimal(16,2)," + - "`broker` varchar(10)," + - "PRIMARY KEY (`txt_account_id`,`occur_trade_date`,`serial_id`) /*T![clustered_index] CLUSTERED */," + - "KEY `idx_serial_id` (`serial_id`)" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci " + - "PARTITION BY RANGE COLUMNS(`serial_id`) (" + - "PARTITION `p202209` VALUES LESS THAN ('20221001')," + - "PARTITION `p202210` VALUES LESS THAN ('20221101')," + - "PARTITION `p202211` VALUES LESS THAN ('20221201')" + - ")") - - tk.MustExec("CREATE TABLE `t` ( " + - "`txn_account_id` varchar(24), " + - "`account_id` varchar(32), " + - "`broker` varchar(10), " + - "PRIMARY KEY (`txn_account_id`) /*T![clustered_index] CLUSTERED */ " + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci") - - tk.MustExec("INSERT INTO `c` (serial_id, txt_account_id, capital_sub_class, occur_trade_date, occur_amount, broker) VALUES ('2022111700196920','04482786','CUST','2022-11-17',-2.01,'0009')") - tk.MustExec("INSERT INTO `t` VALUES ('04482786','1142927','0009')") - - tk.MustExec(`set tidb_partition_prune_mode='dynamic'`) - tk.MustExec(`analyze table c`) - tk.MustExec(`analyze table t`) - query := `select - /*+ inl_join(c) */ - c.occur_amount -from - c - join t on c.txt_account_id = t.txn_account_id - and t.broker = '0009' - and c.occur_trade_date = '2022-11-17'` - tk.MustQuery("explain " + query).Check(testkit.Rows(""+ - "IndexJoin_22 1.00 root inner join, inner:TableReader_21, outer key:test39999.t.txn_account_id, inner key:test39999.c.txt_account_id, equal cond:eq(test39999.t.txn_account_id, test39999.c.txt_account_id)", - "├─TableReader_27(Build) 1.00 root data:Selection_26", - "│ └─Selection_26 1.00 cop[tikv] eq(test39999.t.broker, \"0009\")", - "│ └─TableFullScan_25 1.00 cop[tikv] table:t keep order:false", - "└─TableReader_21(Probe) 1.00 root partition:all data:Selection_20", - " └─Selection_20 1.00 cop[tikv] eq(test39999.c.occur_trade_date, 2022-11-17 00:00:00.000000)", - " └─TableRangeScan_19 1.00 cop[tikv] table:c range: decided by [eq(test39999.c.txt_account_id, test39999.t.txn_account_id) eq(test39999.c.occur_trade_date, 2022-11-17 00:00:00.000000)], keep order:false")) - tk.MustQuery(query).Check(testkit.Rows("-2.01")) - - // Add the missing partition key part. - tk.MustExec(`alter table t add column serial_id varchar(24) default '2022111700196920'`) - query += ` and c.serial_id = t.serial_id` - tk.MustQuery(query).Check(testkit.Rows("-2.01")) - tk.MustQuery("explain " + query).Check(testkit.Rows(""+ - `IndexJoin_20 0.80 root inner join, inner:TableReader_19, outer key:test39999.t.txn_account_id, test39999.t.serial_id, inner key:test39999.c.txt_account_id, test39999.c.serial_id, equal cond:eq(test39999.t.serial_id, test39999.c.serial_id), eq(test39999.t.txn_account_id, test39999.c.txt_account_id)`, - `├─TableReader_25(Build) 0.80 root data:Selection_24`, - `│ └─Selection_24 0.80 cop[tikv] eq(test39999.t.broker, "0009"), not(isnull(test39999.t.serial_id))`, - `│ └─TableFullScan_23 1.00 cop[tikv] table:t keep order:false`, - `└─TableReader_19(Probe) 0.80 root partition:all data:Selection_18`, - ` └─Selection_18 0.80 cop[tikv] eq(test39999.c.occur_trade_date, 2022-11-17 00:00:00.000000)`, - ` └─TableRangeScan_17 0.80 cop[tikv] table:c range: decided by [eq(test39999.c.txt_account_id, test39999.t.txn_account_id) eq(test39999.c.serial_id, test39999.t.serial_id) eq(test39999.c.occur_trade_date, 2022-11-17 00:00:00.000000)], keep order:false`)) -} diff --git a/pkg/executor/prepared_test.go b/pkg/executor/prepared_test.go index 0cba4e45bebc8..c65e9151655ec 100644 --- a/pkg/executor/prepared_test.go +++ b/pkg/executor/prepared_test.go @@ -23,10 +23,8 @@ import ( "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/model" - "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" - "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testdata" @@ -34,75 +32,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestPreparedNameResolver(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (id int, KEY id (id))") - tk.MustExec("prepare stmt from 'select * from t limit ? offset ?'") - tk.MustGetErrMsg("prepare stmt from 'select b from t'", - "[planner:1054]Unknown column 'b' in 'field list'") - tk.MustGetErrMsg("prepare stmt from '(select * FROM t) union all (select * FROM t) order by a limit ?'", - "[planner:1054]Unknown column 'a' in 'order clause'") -} - -// a 'create table' DDL statement should be accepted if it has no parameters. -func TestPreparedDDL(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("prepare stmt from 'create table t (id int, KEY id (id))'") -} - -// TestUnsupportedStmtForPrepare is related to https://github.com/pingcap/tidb/issues/17412 -func TestUnsupportedStmtForPrepare(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`prepare stmt0 from "create table t0(a int primary key)"`) - tk.MustGetErrCode(`prepare stmt1 from "execute stmt0"`, mysql.ErrUnsupportedPs) - tk.MustGetErrCode(`prepare stmt2 from "deallocate prepare stmt0"`, mysql.ErrUnsupportedPs) - tk.MustGetErrCode(`prepare stmt4 from "prepare stmt3 from 'create table t1(a int, b int)'"`, mysql.ErrUnsupportedPs) -} - -func TestIgnorePlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - - tk.MustExec("create table t (id int primary key, num int)") - tk.MustExec("insert into t values (1, 1)") - tk.MustExec("insert into t values (2, 2)") - tk.MustExec("insert into t values (3, 3)") - tk.MustExec("prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t where id=?'") - tk.MustExec("set @ignore_plan_doma = 1") - tk.MustExec("execute stmt using @ignore_plan_doma") - require.False(t, tk.Session().GetSessionVars().StmtCtx.UseCache) -} - -func TestPreparedStmtWithHint(t *testing.T) { - // see https://github.com/pingcap/tidb/issues/18535 - store, dom := testkit.CreateMockStoreAndDomain(t) - sv := server.CreateMockServer(t, store) - sv.SetDomain(dom) - defer sv.Close() - - conn1 := server.CreateMockConn(t, sv) - tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) - - go dom.ExpensiveQueryHandle().SetSessionManager(sv).Run() - tk.MustExec("prepare stmt from \"select /*+ max_execution_time(100) */ sleep(10)\"") - tk.MustQuery("execute stmt").Check(testkit.Rows("1")) - - // see https://github.com/pingcap/tidb/issues/46817 - tk.MustExec("use test") - tk.MustExec("create table if not exists t (i int)") - tk.MustExec("prepare stmt from 'with a as (select /*+ qb_name(qb1) */ * from t) select /*+ leading(@qb1)*/ * from a;'") -} - func TestPreparedNullParam(t *testing.T) { store := testkit.CreateMockStore(t) flags := []bool{false, true} @@ -832,23 +761,6 @@ func TestPlanCacheOperators(t *testing.T) { } } -func TestIssue28782(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("prepare stmt from 'SELECT IF(?, 1, 0);';") - tk.MustExec("set @a=1, @b=null, @c=0") - - tk.MustQuery("execute stmt using @a;").Check(testkit.Rows("1")) - tk.MustQuery("execute stmt using @b;").Check(testkit.Rows("0")) - // TODO(Reminiscent): Support cache more tableDual plan. - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - tk.MustQuery("execute stmt using @c;").Check(testkit.Rows("0")) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - func TestIssue29101(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -919,39 +831,6 @@ func TestIssue29101(t *testing.T) { tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) // can use the plan-cache } -func TestIssue28087And28162(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - // issue 28087 - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists IDT_26207`) - tk.MustExec(`CREATE TABLE IDT_26207 (col1 bit(1))`) - tk.MustExec(`insert into IDT_26207 values(0x0), (0x1)`) - tk.MustExec(`prepare stmt from 'select t1.col1 from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'`) - tk.MustExec(`set @a=0x01, @b=0x01, @c=0x01`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x01")) - tk.MustExec(`set @a=0x00, @b=0x00, @c=0x01`) - tk.MustQuery(`execute stmt using @a,@b,@c`).Check(testkit.Rows("\x00", "\x01")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0")) - - // issue 28162 - tk.MustExec(`drop table if exists IDT_MC21780`) - tk.MustExec(`CREATE TABLE IDT_MC21780 ( - COL1 timestamp NULL DEFAULT NULL, - COL2 timestamp NULL DEFAULT NULL, - COL3 timestamp NULL DEFAULT NULL, - KEY U_M_COL (COL1,COL2) - )`) - tk.MustExec(`insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28")`) - tk.MustExec(`prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'`) - tk.MustExec(`set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"`) - tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows()) - tk.MustExec(`set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"`) - tk.MustQuery(`execute stmt using @a,@b,@c,@d`).Check(testkit.Rows("1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28")) - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("1")) -} - func TestParameterPushDown(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1147,27 +1026,6 @@ func TestPreparePlanCache4DifferentSystemVars(t *testing.T) { tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) } -func TestTemporaryTable4PlanCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - tk.MustExec("use test") - tk.MustExec("set @@tidb_enable_collect_execution_info=0;") - tk.MustExec("drop table if exists tmp2") - tk.MustExec("create temporary table tmp2 (a int, b int, key(a), key(b));") - tk.MustExec("prepare stmt from 'select * from tmp2;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - - tk.MustExec("drop table if exists tmp_t;") - tk.MustExec("create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows") - tk.MustExec("prepare stmt from 'select * from tmp_t;';") - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("execute stmt;").Check(testkit.Rows()) - tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) -} - func TestPrepareStmtAfterIsolationReadChange(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) @@ -1246,19 +1104,6 @@ func TestPreparePC4Binding(t *testing.T) { tk.MustQuery("select @@last_plan_from_binding").Check(testkit.Rows("1")) } -func TestIssue31141(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`set tidb_enable_prepared_plan_cache=1`) - tk.MustExec("set @@tidb_txn_mode = 'pessimistic'") - - // No panic here. - tk.MustExec("prepare stmt1 from 'do 1'") - - tk.MustExec("set @@tidb_txn_mode = 'optimistic'") - tk.MustExec("prepare stmt1 from 'do 1'") -} - func TestMaxPreparedStmtCount(t *testing.T) { oldVal := atomic.LoadInt64(&variable.PreparedStmtCount) atomic.StoreInt64(&variable.PreparedStmtCount, 0) diff --git a/pkg/executor/projection.go b/pkg/executor/projection.go index a55294239dc4b..302197fc9a44c 100644 --- a/pkg/executor/projection.go +++ b/pkg/executor/projection.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/exec" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" @@ -462,7 +463,7 @@ func (w *projectionWorker) run(ctx context.Context) { func recoveryProjection(output *projectionOutput, r interface{}) { if output != nil { - output.done <- errors.Errorf("%v", r) + output.done <- util.GetRecoverError(r) } logutil.BgLogger().Error("projection executor panicked", zap.String("error", fmt.Sprintf("%v", r)), zap.Stack("stack")) } diff --git a/pkg/executor/recover_test.go b/pkg/executor/recover_test.go index 7c678510d28bb..f56de7988bfb9 100644 --- a/pkg/executor/recover_test.go +++ b/pkg/executor/recover_test.go @@ -92,6 +92,11 @@ func TestRecoverTable(t *testing.T) { err := tk.ExecToErr(fmt.Sprintf("recover table by job %d", 10000000)) require.Error(t, err) + // recover table by zero JobID. + // related issue: https://github.com/pingcap/tidb/issues/46296 + err = tk.ExecToErr(fmt.Sprintf("recover table by job %d", 0)) + require.Error(t, err) + // Disable GC by manual first, then after recover table, the GC enable status should also be disabled. require.NoError(t, gcutil.DisableGC(tk.Session())) diff --git a/pkg/executor/revoke.go b/pkg/executor/revoke.go index 3aaed4377f905..dbeba1320987e 100644 --- a/pkg/executor/revoke.go +++ b/pkg/executor/revoke.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" ) @@ -221,12 +222,12 @@ func (e *RevokeExec) revokeGlobalPriv(internalSession sessionctx.Context, priv * } } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.UserTable) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.UserTable) err := composeGlobalPrivUpdate(sql, priv.Priv, "N") if err != nil { return err } - sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%?", user, strings.ToLower(host)) + sqlescape.MustFormatSQL(sql, " WHERE User=%? AND Host=%?", user, strings.ToLower(host)) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err @@ -240,12 +241,12 @@ func (e *RevokeExec) revokeDBPriv(internalSession sessionctx.Context, priv *ast. } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.DBTable) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.DBTable) err := composeDBPrivUpdate(sql, priv.Priv, "N") if err != nil { return err } - sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%?", userName, host, dbName) + sqlescape.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%?", userName, host, dbName) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { @@ -253,10 +254,10 @@ func (e *RevokeExec) revokeDBPriv(internalSession sessionctx.Context, priv *ast. } sql = new(strings.Builder) - sqlexec.MustFormatSQL(sql, "DELETE FROM %n.%n WHERE User=%? AND Host=%? AND DB=%?", mysql.SystemDB, mysql.DBTable, userName, host, dbName) + sqlescape.MustFormatSQL(sql, "DELETE FROM %n.%n WHERE User=%? AND Host=%? AND DB=%?", mysql.SystemDB, mysql.DBTable, userName, host, dbName) for _, v := range append(mysql.AllDBPrivs, mysql.GrantPriv) { - sqlexec.MustFormatSQL(sql, " AND %n='N'", v.ColumnString()) + sqlescape.MustFormatSQL(sql, " AND %n='N'", v.ColumnString()) } _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) return err @@ -275,13 +276,13 @@ func (e *RevokeExec) revokeTablePriv(internalSession sessionctx.Context, priv *a tblName = tbl.Meta().Name.O } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.TablePrivTable) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.TablePrivTable) isDelRow, err := composeTablePrivUpdateForRevoke(internalSession, sql, priv.Priv, user, host, dbName, tblName) if err != nil { return err } - sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", user, host, dbName, tblName) + sqlescape.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", user, host, dbName, tblName) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { return err @@ -289,7 +290,7 @@ func (e *RevokeExec) revokeTablePriv(internalSession sessionctx.Context, priv *a if isDelRow { sql.Reset() - sqlexec.MustFormatSQL(sql, "DELETE FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", mysql.SystemDB, mysql.TablePrivTable, user, host, dbName, tblName) + sqlescape.MustFormatSQL(sql, "DELETE FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%?", mysql.SystemDB, mysql.TablePrivTable, user, host, dbName, tblName) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) } return err @@ -309,12 +310,12 @@ func (e *RevokeExec) revokeColumnPriv(internalSession sessionctx.Context, priv * } sql.Reset() - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.ColumnPrivTable) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.ColumnPrivTable) isDelRow, err := composeColumnPrivUpdateForRevoke(internalSession, sql, priv.Priv, user, host, dbName, tbl.Meta().Name.O, col.Name.O) if err != nil { return err } - sqlexec.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", user, host, dbName, tbl.Meta().Name.O, col.Name.O) + sqlescape.MustFormatSQL(sql, " WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", user, host, dbName, tbl.Meta().Name.O, col.Name.O) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { @@ -323,7 +324,7 @@ func (e *RevokeExec) revokeColumnPriv(internalSession sessionctx.Context, priv * if isDelRow { sql.Reset() - sqlexec.MustFormatSQL(sql, "DELETE FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", mysql.SystemDB, mysql.ColumnPrivTable, user, host, dbName, tbl.Meta().Name.O, col.Name.O) + sqlescape.MustFormatSQL(sql, "DELETE FROM %n.%n WHERE User=%? AND Host=%? AND DB=%? AND Table_name=%? AND Column_name=%?", mysql.SystemDB, mysql.ColumnPrivTable, user, host, dbName, tbl.Meta().Name.O, col.Name.O) _, err = internalSession.(sqlexec.SQLExecutor).ExecuteInternal(ctx, sql.String()) if err != nil { return err @@ -374,7 +375,7 @@ func composeTablePrivUpdateForRevoke(ctx sessionctx.Context, sql *strings.Builde return false, err } } - sqlexec.MustFormatSQL(sql, `Table_priv=%?, Column_priv=%?, Grantor=%?`, strings.Join(newTablePriv, ","), strings.Join(newColumnPriv, ","), ctx.GetSessionVars().User.String()) + sqlescape.MustFormatSQL(sql, `Table_priv=%?, Column_priv=%?, Grantor=%?`, strings.Join(newTablePriv, ","), strings.Join(newColumnPriv, ","), ctx.GetSessionVars().User.String()) return len(newTablePriv) == 0, nil } @@ -394,6 +395,6 @@ func composeColumnPrivUpdateForRevoke(ctx sessionctx.Context, sql *strings.Build } } - sqlexec.MustFormatSQL(sql, `Column_priv=%?`, strings.Join(newColumnPriv, ",")) + sqlescape.MustFormatSQL(sql, `Column_priv=%?`, strings.Join(newColumnPriv, ",")) return len(newColumnPriv) == 0, nil } diff --git a/pkg/executor/revoke_test.go b/pkg/executor/revoke_test.go index 0cfb9e006c5d7..e8dbaf2836a31 100644 --- a/pkg/executor/revoke_test.go +++ b/pkg/executor/revoke_test.go @@ -21,9 +21,7 @@ import ( "github.com/pingcap/tidb/pkg/executor" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/stretchr/testify/require" ) @@ -177,132 +175,3 @@ func TestRevokeColumnScope(t *testing.T) { rows := tk.MustQuery(`SELECT Column_priv FROM mysql.Columns_priv WHERE User="testCol1Revoke" and host="localhost" and db="test" and Table_name="test3"`).Rows() require.Len(t, rows, 0) } - -// ref issue #38421 -func TestRevokeTableSingle(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - // Create a new user. - tk.MustExec(`CREATE USER test;`) - tk.MustExec(`CREATE TABLE test.test1(c1 int);`) - tk.MustExec(`GRANT SELECT ON test.test1 TO test;`) - - tk.MustExec(`REVOKE SELECT ON test.test1 from test;`) - - rows := tk.MustQuery(`SELECT Column_priv FROM mysql.tables_priv WHERE User="test" `).Rows() - require.Len(t, rows, 0) -} - -// ref issue #38421(column fix) -func TestRevokeTableSingleColumn(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - // Create a new user. - tk.MustExec(`CREATE USER test;`) - tk.MustExec(`GRANT SELECT(Host) ON mysql.db TO test`) - tk.MustExec(`GRANT SELECT(DB) ON mysql.db TO test`) - tk.MustExec(`REVOKE SELECT(Host) ON mysql.db FROM test`) - - rows := tk.MustQuery(`SELECT Column_priv FROM mysql.columns_priv WHERE User="test" and Column_name ='Host' `).Rows() - require.Len(t, rows, 0) - rows = tk.MustQuery(`SELECT Column_priv FROM mysql.columns_priv WHERE User="test" and Column_name ='DB' `).Rows() - require.Len(t, rows, 1) -} - -func TestRevokeDynamicPrivs(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("DROP USER if exists dyn") - tk.MustExec("create user dyn") - - tk.MustExec("GRANT BACKUP_Admin ON *.* TO dyn") // grant one priv - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % BACKUP_ADMIN N")) - - // try revoking only on test.* - should fail: - _, err := tk.Exec("REVOKE BACKUP_Admin,system_variables_admin ON test.* FROM dyn") - require.True(t, terror.ErrorEqual(err, exeerrors.ErrIllegalPrivilegeLevel)) - - // privs should still be intact: - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % BACKUP_ADMIN N")) - // with correct usage, the privilege is revoked - tk.MustExec("REVOKE BACKUP_Admin ON *.* FROM dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows()) - - // Revoke bogus is a warning in MySQL - tk.MustExec("REVOKE bogus ON *.* FROM dyn") - tk.MustQuery("SHOW WARNINGS").Check(testkit.Rows("Warning 3929 Dynamic privilege 'BOGUS' is not registered with the server.")) - - // grant and revoke two dynamic privileges at once. - tk.MustExec("GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* TO dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % BACKUP_ADMIN N", "dyn % SYSTEM_VARIABLES_ADMIN N")) - tk.MustExec("REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* FROM dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows()) - - // revoke a combination of dynamic + non-dynamic - tk.MustExec("GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* TO dyn") - tk.MustExec("REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* FROM dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows()) - - // revoke grant option from privileges - tk.MustExec("GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT ON *.* TO dyn WITH GRANT OPTION") - tk.MustExec("REVOKE BACKUP_ADMIN, SELECT, GRANT OPTION ON *.* FROM dyn") - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option").Check(testkit.Rows("dyn % SYSTEM_VARIABLES_ADMIN Y")) -} - -func TestRevokeOnNonExistTable(t *testing.T) { - // issue #28533 - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("CREATE DATABASE d1;") - defer tk.MustExec("DROP DATABASE IF EXISTS d1;") - tk.MustExec("USE d1;") - tk.MustExec("CREATE TABLE t1 (a int)") - defer tk.MustExec("DROP TABLE IF EXISTS t1") - tk.MustExec("CREATE USER issue28533") - defer tk.MustExec("DROP USER issue28533") - - // GRANT ON existent table success - tk.MustExec("GRANT ALTER ON d1.t1 TO issue28533;") - // GRANT ON non-existent table success - tk.MustExec("GRANT INSERT, CREATE ON d1.t2 TO issue28533;") - - // REVOKE ON non-existent table success - tk.MustExec("DROP TABLE t1;") - tk.MustExec("REVOKE ALTER ON d1.t1 FROM issue28533;") -} - -// Check https://github.com/pingcap/tidb/issues/41773. -func TestIssue41773(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table if not exists xx (id int)") - tk.MustExec("CREATE USER 't1234'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU';") - tk.MustExec("GRANT USAGE ON * TO 't1234'@'%';") - tk.MustExec("GRANT USAGE ON test.* TO 't1234'@'%';") - tk.MustExec("GRANT USAGE ON test.xx TO 't1234'@'%';") - tk.MustExec("REVOKE USAGE ON * FROM 't1234'@'%';") - tk.MustExec("REVOKE USAGE ON test.* FROM 't1234'@'%';") - tk.MustExec("REVOKE USAGE ON test.xx FROM 't1234'@'%';") -} - -// Check https://github.com/pingcap/tidb/issues/41048 -func TestCaseInsensitiveSchemaNames(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`CREATE TABLE test.TABLE_PRIV(id int, name varchar(20));`) - // Verify the case-insensitive updates for mysql.tables_priv table. - tk.MustExec(`GRANT SELECT ON test.table_priv TO 'root'@'%';`) - tk.MustExec(`revoke SELECT ON test.TABLE_PRIV from 'root'@'%';;`) - - // Verify the case-insensitive updates for mysql.db table. - tk.MustExec(`GRANT SELECT ON test.* TO 'root'@'%';`) - tk.MustExec(`revoke SELECT ON tESt.* from 'root'@'%';;`) - - // Verify the case-insensitive updates for mysql.columns_priv table. - tk.MustExec(`GRANT SELECT (id), INSERT (ID, name) ON tEst.TABLE_PRIV TO 'root'@'%';`) - tk.MustExec(`REVOKE SELECT (ID) ON test.taBle_priv from 'root'@'%';;`) -} diff --git a/pkg/executor/sample_test.go b/pkg/executor/sample_test.go index 4a2a110f35367..bd3a477807ec2 100644 --- a/pkg/executor/sample_test.go +++ b/pkg/executor/sample_test.go @@ -20,7 +20,6 @@ import ( "testing" "github.com/pingcap/tidb/pkg/ddl" - "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" @@ -118,157 +117,6 @@ func TestTableSamplePlan(t *testing.T) { require.Regexp(t, ".*TableSample.*", tableSample) } -func TestTableSampleSchema(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := createSampleTestkit(t, store) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - // Clustered index - tk.MustExec("create table t (a varchar(255) primary key, b bigint);") - tk.MustExec("insert into t values ('b', 100), ('y', 100);") - tk.MustQuery("split table t between ('a') and ('z') regions 2;").Check(testkit.Rows("1 1")) - tk.MustQuery("select a from t tablesample regions();").Check(testkit.Rows("b", "y")) - - tk.MustExec("drop table t;") - tk.MustExec("create table t (a varchar(255), b int, c decimal, primary key (a, b, c));") - tk.MustQuery("split table t between ('a', 0, 0) and ('z', 100, 100) regions 2;").Check(testkit.Rows("1 1")) - tk.MustExec("insert into t values ('b', 10, 100), ('y', 100, 10);") - tk.MustQuery("select * from t tablesample regions();").Check(testkit.Rows("b 10 100", "y 100 10")) - - // PKIsHandle - tk.MustExec("drop table t;") - tk.MustExec("create table t (a bigint primary key, b int default 10);") - tk.MustQuery("split table t between (1) and (100000) regions 4;").Check(testkit.Rows("3 1")) - tk.MustExec("insert into t(a) values (200), (25600), (50300), (99900), (99901)") - tk.MustQuery("select a from t tablesample regions();").Check(testkit.Rows("200", "25600", "50300", "99900")) - - // _tidb_rowid - tk.MustExec("drop table t;") - tk.MustExec("create table t (a bigint, b int default 10);") - tk.MustQuery("split table t between (0) and (100000) regions 4;").Check(testkit.Rows("3 1")) - tk.MustExec("insert into t(a) values (1), (2), (3);") - tk.MustQuery("select a from t tablesample regions();").Check(testkit.Rows("1")) -} - -func TestTableSampleInvalid(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := createSampleTestkit(t, store) - tk.MustExec("create table t (a int, b varchar(255));") - tk.MustExec("insert into t values (1, 'abc');") - tk.MustExec("create view v as select * from t;") - tk.MustGetErrCode("select * from v tablesample regions();", errno.ErrInvalidTableSample) - tk.MustGetErrCode("select * from information_schema.tables tablesample regions();", errno.ErrInvalidTableSample) - - tk.MustGetErrCode("select a from t tablesample system();", errno.ErrInvalidTableSample) - tk.MustGetErrCode("select a from t tablesample bernoulli(10 percent);", errno.ErrInvalidTableSample) - tk.MustGetErrCode("select a from t as t1 tablesample regions(), t as t2 tablesample system();", errno.ErrInvalidTableSample) - tk.MustGetErrCode("select a from t tablesample ();", errno.ErrInvalidTableSample) -} - -func TestTableSampleWithTiDBRowID(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := createSampleTestkit(t, store) - tk.MustExec("create table t (a int, b varchar(255));") - tk.MustExec("insert into t values (1, 'abc');") - tk.MustQuery("select _tidb_rowid from t tablesample regions();").Check(testkit.Rows("1")) - tk.MustQuery("select a, _tidb_rowid from t tablesample regions();").Check(testkit.Rows("1 1")) - tk.MustQuery("select _tidb_rowid, b from t tablesample regions();").Check(testkit.Rows("1 abc")) - tk.MustQuery("select b, _tidb_rowid, a from t tablesample regions();").Check(testkit.Rows("abc 1 1")) -} - -func TestTableSampleWithPartition(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := createSampleTestkit(t, store) - tk.MustExec("create table t (a int, b varchar(255), primary key (a)) partition by hash(a) partitions 2;") - tk.MustExec("insert into t values (1, '1'), (2, '2'), (3, '3');") - rows := tk.MustQuery("select * from t tablesample regions();").Rows() - require.Len(t, rows, 2) - - tk.MustExec("delete from t;") - tk.MustExec("insert into t values (1, '1');") - rows = tk.MustQuery("select * from t partition (p0) tablesample regions();").Rows() - require.Len(t, rows, 0) - rows = tk.MustQuery("select * from t partition (p1) tablesample regions();").Rows() - require.Len(t, rows, 1) - - // Test https://github.com/pingcap/tidb/issues/27349. - tk.MustExec("drop table if exists t;") - tk.MustExec(`create table t (a int, b int, unique key idx(a)) partition by range (a) ( - partition p0 values less than (0), - partition p1 values less than (10), - partition p2 values less than (30), - partition p3 values less than (maxvalue));`) - tk.MustExec("insert into t values (2, 2), (31, 31), (12, 12);") - tk.MustQuery("select _tidb_rowid from t tablesample regions() order by _tidb_rowid;"). - Check(testkit.Rows("1", "2", "3")) // The order of _tidb_rowid should be correct. -} - -func TestTableSampleGeneratedColumns(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := createSampleTestkit(t, store) - tk.MustExec("create table t (a int primary key, b int as (a + 1), c int as (b + 1), d int as (c + 1));") - tk.MustQuery("split table t between (0) and (10000) regions 4;").Check(testkit.Rows("3 1")) - tk.MustExec("insert into t(a) values (1), (2), (2999), (4999), (9999);") - tk.MustQuery("select a from t tablesample regions()").Check(testkit.Rows("1", "2999", "9999")) - tk.MustQuery("select c from t tablesample regions()").Check(testkit.Rows("3", "3001", "10001")) - tk.MustQuery("select a, b from t tablesample regions()").Check( - testkit.Rows("1 2", "2999 3000", "9999 10000")) - tk.MustQuery("select d, c from t tablesample regions()").Check( - testkit.Rows("4 3", "3002 3001", "10002 10001")) - tk.MustQuery("select a, d from t tablesample regions()").Check( - testkit.Rows("1 4", "2999 3002", "9999 10002")) -} - -func TestTableSampleUnionScanIgnorePendingKV(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := createSampleTestkit(t, store) - tk.MustExec("create table t (a int primary key);") - tk.MustQuery("split table t between (0) and (40000) regions 4;").Check(testkit.Rows("3 1")) - tk.MustExec("insert into t values (1), (1000), (10002);") - tk.MustQuery("select * from t tablesample regions();").Check(testkit.Rows("1", "10002")) - - tk.MustExec("begin;") - tk.MustExec("insert into t values (20006), (50000);") - // The memory DB values in transactions are ignored. - tk.MustQuery("select * from t tablesample regions();").Check(testkit.Rows("1", "10002")) - tk.MustExec("delete from t where a = 1;") - // The memory DB values in transactions are ignored. - tk.MustQuery("select * from t tablesample regions();").Check(testkit.Rows("1", "10002")) - tk.MustExec("commit;") - tk.MustQuery("select * from t tablesample regions();").Check(testkit.Rows("1000", "10002", "20006", "50000")) -} - -func TestTableSampleTransactionConsistency(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := createSampleTestkit(t, store) - tk2 := createSampleTestkit(t, store) - - tk.MustExec("create table t (a int primary key);") - tk.MustQuery("split table t between (0) and (40000) regions 4;").Check(testkit.Rows("3 1")) - tk.MustExec("insert into t values (1), (1000), (10002);") - - tk.MustExec("begin;") - tk.MustQuery("select * from t tablesample regions();").Check(testkit.Rows("1", "10002")) - tk2.MustExec("insert into t values (20006), (50000);") - tk.MustQuery("select * from t tablesample regions();").Check(testkit.Rows("1", "10002")) - tk.MustExec("commit;") - tk.MustQuery("select * from t tablesample regions();").Check(testkit.Rows("1", "10002", "20006", "50000")) -} - -func TestTableSampleNotSupportedPlanWarning(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := createSampleTestkit(t, store) - tk.MustExec("create table t (a int primary key, b int, c varchar(255));") - tk.MustQuery("split table t between (0) and (10000) regions 5;").Check(testkit.Rows("4 1")) - tk.MustExec("insert into t values (1000, 1, '1'), (1001, 1, '1'), (2100, 2, '2'), (4500, 3, '3');") - - tk.MustExec("create index idx_0 on t (b);") - tk.MustQuery("select a from t tablesample regions() order by a;").Check( - testkit.Rows("1000", "2100", "4500")) - tk.MustQuery("select a from t use index (idx_0) tablesample regions() order by a;").Check( - testkit.Rows("1000", "1001", "2100", "4500")) - tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 8128 Invalid TABLESAMPLE: plan not supported")) -} - func TestMaxChunkSize(t *testing.T) { store := testkit.CreateMockStore(t) tk := createSampleTestkit(t, store) diff --git a/pkg/executor/set.go b/pkg/executor/set.go index aa6cfaf796d3d..27288b32f6c63 100644 --- a/pkg/executor/set.go +++ b/pkg/executor/set.go @@ -166,9 +166,7 @@ func (e *SetExecutor) setSysVariable(ctx context.Context, name string, v *expres dom := domain.GetDomain(e.Ctx()) serverID := disttaskutil.GenerateSubtaskExecID(ctx, dom.DDL().GetID()) _, err = e.Ctx().(sqlexec.SQLExecutor).ExecuteInternal(ctx, - `update mysql.dist_framework_meta - set role = %? - where host = %?`, valStr, serverID) + `replace into mysql.dist_framework_meta values(%?, %?, DEFAULT)`, serverID, valStr) } return err } diff --git a/pkg/executor/show.go b/pkg/executor/show.go index 9875cfa117ced..cc9fe3831d5d1 100644 --- a/pkg/executor/show.go +++ b/pkg/executor/show.go @@ -2262,7 +2262,7 @@ func fillOneImportJobInfo(info *importer.JobInfo, result *chunk.Chunk, importedR result.AppendInt64(3, info.TableID) result.AppendString(4, info.Step) result.AppendString(5, info.Status) - result.AppendString(6, units.HumanSize(float64(info.SourceFileSize))) + result.AppendString(6, units.BytesSize(float64(info.SourceFileSize))) if info.Summary != nil { result.AppendUint64(7, info.Summary.ImportedRows) } else if importedRowCount >= 0 { diff --git a/pkg/executor/shuffle.go b/pkg/executor/shuffle.go index c1b9a4a0faecd..313111f6d6853 100644 --- a/pkg/executor/shuffle.go +++ b/pkg/executor/shuffle.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/pkg/executor/internal/vecgroupchecker" "github.com/pingcap/tidb/pkg/expression" "github.com/pingcap/tidb/pkg/sessionctx" + "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/channel" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/execdetails" @@ -254,8 +255,8 @@ func (e *ShuffleExec) Next(ctx context.Context, req *chunk.Chunk) error { } func recoveryShuffleExec(output chan *shuffleOutput, r interface{}) { - err := errors.Errorf("%v", r) - output <- &shuffleOutput{err: errors.Errorf("%v", r)} + err := util.GetRecoverError(r) + output <- &shuffleOutput{err: util.GetRecoverError(r)} logutil.BgLogger().Error("shuffle panicked", zap.Error(err), zap.Stack("stack")) } diff --git a/pkg/executor/simple.go b/pkg/executor/simple.go index 748abf88b17b4..03d3675dadbb4 100644 --- a/pkg/executor/simple.go +++ b/pkg/executor/simple.go @@ -62,6 +62,7 @@ import ( "github.com/pingcap/tidb/pkg/util/logutil" pwdValidator "github.com/pingcap/tidb/pkg/util/password-validation" "github.com/pingcap/tidb/pkg/util/sem" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/pingcap/tidb/pkg/util/timeutil" "github.com/pingcap/tidb/pkg/util/tls" @@ -218,7 +219,7 @@ func (e *SimpleExec) setDefaultRoleNone(s *ast.SetDefaultRoleStmt) error { u.Hostname = "%" } sql.Reset() - sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", u.Username, u.Hostname) + sqlescape.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", u.Username, u.Hostname) if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) if _, rollbackErr := sqlExecutor.ExecuteInternal(ctx, "rollback"); rollbackErr != nil { @@ -269,7 +270,7 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul user.Hostname = "%" } sql.Reset() - sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) + sqlescape.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { @@ -287,7 +288,7 @@ func (e *SimpleExec) setDefaultRoleRegular(ctx context.Context, s *ast.SetDefaul return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(role.String(), user.String()) } sql.Reset() - sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles values(%?, %?, %?, %?);", user.Hostname, user.Username, role.Hostname, role.Username) + sqlescape.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles values(%?, %?, %?, %?);", user.Hostname, user.Username, role.Hostname, role.Username) if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { @@ -329,7 +330,7 @@ func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRol user.Hostname = "%" } sql.Reset() - sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) + sqlescape.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { @@ -338,7 +339,7 @@ func (e *SimpleExec) setDefaultRoleAll(ctx context.Context, s *ast.SetDefaultRol return err } sql.Reset() - sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles(HOST,USER,DEFAULT_ROLE_HOST,DEFAULT_ROLE_USER) SELECT TO_HOST,TO_USER,FROM_HOST,FROM_USER FROM mysql.role_edges WHERE TO_HOST=%? AND TO_USER=%?;", user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles(HOST,USER,DEFAULT_ROLE_HOST,DEFAULT_ROLE_USER) SELECT TO_HOST,TO_USER,FROM_HOST,FROM_USER FROM mysql.role_edges WHERE TO_HOST=%? AND TO_USER=%?;", user.Hostname, user.Username) if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) if _, rollbackErr := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); rollbackErr != nil { @@ -372,7 +373,7 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) + sqlescape.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) if _, rollbackErr := sqlExecutor.ExecuteInternal(ctx, "rollback"); rollbackErr != nil { @@ -384,20 +385,20 @@ func (e *SimpleExec) setDefaultRoleForCurrentUser(s *ast.SetDefaultRoleStmt) (er sql.Reset() switch s.SetRoleOpt { case ast.SetRoleNone: - sqlexec.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) + sqlescape.MustFormatSQL(sql, "DELETE IGNORE FROM mysql.default_roles WHERE USER=%? AND HOST=%?;", user.Username, user.Hostname) case ast.SetRoleAll: - sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles(HOST,USER,DEFAULT_ROLE_HOST,DEFAULT_ROLE_USER) SELECT TO_HOST,TO_USER,FROM_HOST,FROM_USER FROM mysql.role_edges WHERE TO_HOST=%? AND TO_USER=%?;", user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles(HOST,USER,DEFAULT_ROLE_HOST,DEFAULT_ROLE_USER) SELECT TO_HOST,TO_USER,FROM_HOST,FROM_USER FROM mysql.role_edges WHERE TO_HOST=%? AND TO_USER=%?;", user.Hostname, user.Username) case ast.SetRoleRegular: - sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles values") + sqlescape.MustFormatSQL(sql, "INSERT IGNORE INTO mysql.default_roles values") for i, role := range s.RoleList { if i > 0 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } ok := checker.FindEdge(e.Ctx(), role, user) if !ok { return exeerrors.ErrRoleNotGranted.GenWithStackByArgs(role.String(), user.String()) } - sqlexec.MustFormatSQL(sql, "(%?, %?, %?, %?)", user.Hostname, user.Username, role.Hostname, role.Username) + sqlescape.MustFormatSQL(sql, "(%?, %?, %?, %?)", user.Hostname, user.Username, role.Hostname, role.Username) } } @@ -727,7 +728,7 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm role.Hostname = "%" } sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE IGNORE FROM %n.%n WHERE FROM_HOST=%? and FROM_USER=%? and TO_HOST=%? and TO_USER=%?`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE IGNORE FROM %n.%n WHERE FROM_HOST=%? and FROM_USER=%? and TO_HOST=%? and TO_USER=%?`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return errors.Trace(err) @@ -736,7 +737,7 @@ func (e *SimpleExec) executeRevokeRole(ctx context.Context, s *ast.RevokeRoleStm } sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE IGNORE FROM %n.%n WHERE DEFAULT_ROLE_HOST=%? and DEFAULT_ROLE_USER=%? and HOST=%? and USER=%?`, mysql.SystemDB, mysql.DefaultRoleTable, role.Hostname, role.Username, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE IGNORE FROM %n.%n WHERE DEFAULT_ROLE_HOST=%? and DEFAULT_ROLE_USER=%? and HOST=%? and USER=%?`, mysql.SystemDB, mysql.DefaultRoleTable, role.Hostname, role.Username, user.Hostname, user.Username) if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { return errors.Trace(err) @@ -935,7 +936,7 @@ func readPasswordLockingInfo(ctx context.Context, sqlExecutor sqlexec.SQLExecuto containsNoOthers: false, } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `SELECT JSON_UNQUOTE(JSON_EXTRACT(user_attributes, '$.Password_locking.failed_login_attempts')), + sqlescape.MustFormatSQL(sql, `SELECT JSON_UNQUOTE(JSON_EXTRACT(user_attributes, '$.Password_locking.failed_login_attempts')), JSON_UNQUOTE(JSON_EXTRACT(user_attributes, '$.Password_locking.password_lock_time_days')), JSON_LENGTH(JSON_REMOVE(user_attributes, '$.Password_locking')) FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, name, strings.ToLower(host)) @@ -999,11 +1000,11 @@ func deletePasswordLockingAttribute(ctx context.Context, sqlExecutor sqlexec.SQL sql := new(strings.Builder) if alterUser.containsNoOthers { // If we use JSON_REMOVE(user_attributes, '$.Password_locking') directly here, the result is not compatible with MySQL. - sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET user_attributes=NULL`, mysql.SystemDB, mysql.UserTable) + sqlescape.MustFormatSQL(sql, `UPDATE %n.%n SET user_attributes=NULL`, mysql.SystemDB, mysql.UserTable) } else { - sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET user_attributes=JSON_REMOVE(user_attributes, '$.Password_locking') `, mysql.SystemDB, mysql.UserTable) + sqlescape.MustFormatSQL(sql, `UPDATE %n.%n SET user_attributes=JSON_REMOVE(user_attributes, '$.Password_locking') `, mysql.SystemDB, mysql.UserTable) } - sqlexec.MustFormatSQL(sql, " WHERE Host=%? and User=%?;", host, name) + sqlescape.MustFormatSQL(sql, " WHERE Host=%? and User=%?;", host, name) _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) return err } @@ -1111,9 +1112,9 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm sqlTemplate := "INSERT INTO %n.%n (Host, User, authentication_string, plugin, user_attributes, Account_locked, Token_issuer, Password_expired, Password_lifetime, Password_reuse_time, Password_reuse_history) VALUES " valueTemplate := "(%?, %?, %?, %?, %?, %?, %?, %?, %?" - sqlexec.MustFormatSQL(sql, sqlTemplate, mysql.SystemDB, mysql.UserTable) + sqlescape.MustFormatSQL(sql, sqlTemplate, mysql.SystemDB, mysql.UserTable) if savePasswdHistory { - sqlexec.MustFormatSQL(sqlPasswordHistory, `INSERT INTO %n.%n (Host, User, Password) VALUES `, mysql.SystemDB, mysql.PasswordHistoryTable) + sqlescape.MustFormatSQL(sqlPasswordHistory, `INSERT INTO %n.%n (Host, User, Password) VALUES `, mysql.SystemDB, mysql.PasswordHistoryTable) } users := make([]*auth.UserIdentity, 0, len(s.Specs)) @@ -1128,7 +1129,7 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm return exeerrors.ErrWrongStringLength.GenWithStackByArgs(spec.User.Hostname, "host name", auth.HostNameMaxLength) } if len(users) > 0 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } exists, err1 := userExists(ctx, e.Ctx(), spec.User.Username, spec.User.Hostname) if err1 != nil { @@ -1183,30 +1184,30 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm } hostName := strings.ToLower(spec.User.Hostname) - sqlexec.MustFormatSQL(sql, valueTemplate, hostName, spec.User.Username, pwd, authPlugin, userAttributesStr, plOptions.lockAccount, recordTokenIssuer, plOptions.passwordExpired, plOptions.passwordLifetime) + sqlescape.MustFormatSQL(sql, valueTemplate, hostName, spec.User.Username, pwd, authPlugin, userAttributesStr, plOptions.lockAccount, recordTokenIssuer, plOptions.passwordExpired, plOptions.passwordLifetime) // add Password_reuse_time value. if plOptions.passwordReuseIntervalChange && (plOptions.passwordReuseInterval != notSpecified) { - sqlexec.MustFormatSQL(sql, `, %?`, plOptions.passwordReuseInterval) + sqlescape.MustFormatSQL(sql, `, %?`, plOptions.passwordReuseInterval) } else { - sqlexec.MustFormatSQL(sql, `, %?`, nil) + sqlescape.MustFormatSQL(sql, `, %?`, nil) } // add Password_reuse_history value. if plOptions.passwordHistoryChange && (plOptions.passwordHistory != notSpecified) { - sqlexec.MustFormatSQL(sql, `, %?`, plOptions.passwordHistory) + sqlescape.MustFormatSQL(sql, `, %?`, plOptions.passwordHistory) } else { - sqlexec.MustFormatSQL(sql, `, %?`, nil) + sqlescape.MustFormatSQL(sql, `, %?`, nil) } - sqlexec.MustFormatSQL(sql, `)`) + sqlescape.MustFormatSQL(sql, `)`) // The empty password does not count in the password history and is subject to reuse at any time. // AuthTiDBAuthToken is the token login method on the cloud, // and the Password Reuse Policy does not take effect. if savePasswdHistory && len(pwd) != 0 && !strings.EqualFold(authPlugin, mysql.AuthTiDBAuthToken) { if !passwordInit { - sqlexec.MustFormatSQL(sqlPasswordHistory, ",") + sqlescape.MustFormatSQL(sqlPasswordHistory, ",") } else { passwordInit = false } - sqlexec.MustFormatSQL(sqlPasswordHistory, `( %?, %?, %?)`, hostName, spec.User.Username, pwd) + sqlescape.MustFormatSQL(sqlPasswordHistory, `( %?, %?, %?)`, hostName, spec.User.Username, pwd) } users = append(users, spec.User) } @@ -1245,12 +1246,12 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm if len(privData) != 0 { sql.Reset() - sqlexec.MustFormatSQL(sql, "INSERT IGNORE INTO %n.%n (Host, User, Priv) VALUES ", mysql.SystemDB, mysql.GlobalPrivTable) + sqlescape.MustFormatSQL(sql, "INSERT IGNORE INTO %n.%n (Host, User, Priv) VALUES ", mysql.SystemDB, mysql.GlobalPrivTable) for i, user := range users { if i > 0 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } - sqlexec.MustFormatSQL(sql, `(%?, %?, %?)`, user.Hostname, user.Username, string(hack.String(privData))) + sqlescape.MustFormatSQL(sql, `(%?, %?, %?)`, user.Hostname, user.Username, string(hack.String(privData))) } _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()) if err != nil { @@ -1269,7 +1270,7 @@ func (e *SimpleExec) executeCreateUser(ctx context.Context, s *ast.CreateUserStm func getUserPasswordLimit(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, name string, host string, plOptions *passwordOrLockOptionsInfo) (pRI *passwordReuseInfo, err error) { res := &passwordReuseInfo{notSpecified, notSpecified} sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `SELECT Password_reuse_history,Password_reuse_time FROM %n.%n WHERE User=%? AND Host=%?;`, + sqlescape.MustFormatSQL(sql, `SELECT Password_reuse_history,Password_reuse_time FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, name, strings.ToLower(host)) // Query the specified user password reuse rules. recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) @@ -1341,7 +1342,7 @@ func deleteHistoricalData(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, if passwordReuse.passwordReuseInterval == 0 { deleteTemplate := `DELETE from %n.%n WHERE User= %? AND Host= %? order by Password_timestamp ASC LIMIT ` deleteTemplate = deleteTemplate + strconv.FormatInt(maxDelRows, 10) - sqlexec.MustFormatSQL(sql, deleteTemplate, mysql.SystemDB, mysql.PasswordHistoryTable, + sqlescape.MustFormatSQL(sql, deleteTemplate, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host)) _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { @@ -1353,7 +1354,7 @@ func deleteHistoricalData(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, deleteTemplate := `DELETE from %n.%n WHERE User= %? AND Host= %? AND Password_timestamp < %? order by Password_timestamp ASC LIMIT ` deleteTemplate = deleteTemplate + strconv.FormatInt(maxDelRows, 10) sql.Reset() - sqlexec.MustFormatSQL(sql, deleteTemplate, mysql.SystemDB, mysql.PasswordHistoryTable, + sqlescape.MustFormatSQL(sql, deleteTemplate, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host), beforeDate) _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { @@ -1368,7 +1369,7 @@ func addHistoricalData(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, use return nil } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `INSERT INTO %n.%n (Host, User, Password) VALUES (%?, %?, %?) `, mysql.SystemDB, mysql.PasswordHistoryTable, strings.ToLower(userDetail.host), userDetail.user, userDetail.pwd) + sqlescape.MustFormatSQL(sql, `INSERT INTO %n.%n (Host, User, Password) VALUES (%?, %?, %?) `, mysql.SystemDB, mysql.PasswordHistoryTable, strings.ToLower(userDetail.host), userDetail.user, userDetail.pwd) _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return errors.Trace(err) @@ -1396,7 +1397,7 @@ func checkPasswordsMatch(rows []chunk.Row, oldPwd, authPlugin string) (bool, err func getUserPasswordNum(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, userDetail *userInfo) (deleteNum int64, err error) { sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `SELECT count(*) FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host)) + sqlescape.MustFormatSQL(sql, `SELECT count(*) FROM %n.%n WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host)) recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return 0, err @@ -1422,7 +1423,7 @@ func fullRecordCheck(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, userD switch authPlugin { case mysql.AuthNativePassword, "": sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `SELECT count(*) FROM %n.%n WHERE User= %? AND Host= %? AND Password = %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host), userDetail.pwd) + sqlescape.MustFormatSQL(sql, `SELECT count(*) FROM %n.%n WHERE User= %? AND Host= %? AND Password = %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host), userDetail.pwd) recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return false, err @@ -1442,7 +1443,7 @@ func fullRecordCheck(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, userD return false, nil case mysql.AuthCachingSha2Password, mysql.AuthTiDBSM3Password: sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `SELECT Password FROM %n.%n WHERE User= %? AND Host= %? ;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host)) + sqlescape.MustFormatSQL(sql, `SELECT Password FROM %n.%n WHERE User= %? AND Host= %? ;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host)) recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return false, err @@ -1470,7 +1471,7 @@ func checkPasswordHistoryRule(ctx context.Context, sqlExecutor sqlexec.SQLExecut checkRows := `SELECT count(*) FROM (SELECT Password FROM %n.%n WHERE User=%? AND Host=%? ORDER BY Password_timestamp DESC LIMIT ` checkRows = checkRows + strconv.FormatInt(passwordReuse.passwordHistory, 10) checkRows = checkRows + ` ) as t where t.Password = %? ` - sqlexec.MustFormatSQL(sql, checkRows, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host), userDetail.pwd) + sqlescape.MustFormatSQL(sql, checkRows, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host), userDetail.pwd) recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return false, err @@ -1492,7 +1493,7 @@ func checkPasswordHistoryRule(ctx context.Context, sqlExecutor sqlexec.SQLExecut sql := new(strings.Builder) checkRows := `SELECT Password FROM %n.%n WHERE User=%? AND Host=%? ORDER BY Password_timestamp DESC LIMIT ` checkRows = checkRows + strconv.FormatInt(passwordReuse.passwordHistory, 10) - sqlexec.MustFormatSQL(sql, checkRows, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host)) + sqlescape.MustFormatSQL(sql, checkRows, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host)) recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return false, err @@ -1518,7 +1519,7 @@ func checkPasswordTimeRule(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, switch authPlugin { case mysql.AuthNativePassword, "": sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `SELECT count(*) FROM %n.%n WHERE User=%? AND Host=%? AND Password = %? AND Password_timestamp >= %?;`, + sqlescape.MustFormatSQL(sql, `SELECT count(*) FROM %n.%n WHERE User=%? AND Host=%? AND Password = %? AND Password_timestamp >= %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host), userDetail.pwd, beforeDate) recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { @@ -1538,7 +1539,7 @@ func checkPasswordTimeRule(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, } case mysql.AuthCachingSha2Password, mysql.AuthTiDBSM3Password: sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `SELECT Password FROM %n.%n WHERE User=%? AND Host=%? AND Password_timestamp >= %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host), beforeDate) + sqlescape.MustFormatSQL(sql, `SELECT Password FROM %n.%n WHERE User=%? AND Host=%? AND Password_timestamp >= %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, userDetail.user, strings.ToLower(userDetail.host), beforeDate) recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return false, err @@ -1797,7 +1798,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) if spec.AuthOpt.AuthPlugin != currentAuthPlugin { // delete password history from mysql.password_history. sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, spec.User.Hostname, spec.User.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, spec.User.Hostname, spec.User.Username) if _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()); err != nil { failedUsers = append(failedUsers, spec.User.String()) needRollback = true @@ -1936,14 +1937,14 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) if len(fields) > 0 { sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.UserTable) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.UserTable) for i, f := range fields { - sqlexec.MustFormatSQL(sql, f.expr, f.value) + sqlescape.MustFormatSQL(sql, f.expr, f.value) if i < len(fields)-1 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } } - sqlexec.MustFormatSQL(sql, " WHERE Host=%? and User=%?;", spec.User.Hostname, spec.User.Username) + sqlescape.MustFormatSQL(sql, " WHERE Host=%? and User=%?;", spec.User.Hostname, spec.User.Username) _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { failedUsers = append(failedUsers, spec.User.String()) @@ -1962,7 +1963,7 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) if len(privData) > 0 { sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "INSERT INTO %n.%n (Host, User, Priv) VALUES (%?,%?,%?) ON DUPLICATE KEY UPDATE Priv = values(Priv)", mysql.SystemDB, mysql.GlobalPrivTable, spec.User.Hostname, spec.User.Username, string(hack.String(privData))) + sqlescape.MustFormatSQL(sql, "INSERT INTO %n.%n (Host, User, Priv) VALUES (%?,%?,%?) ON DUPLICATE KEY UPDATE Priv = values(Priv)", mysql.SystemDB, mysql.GlobalPrivTable, spec.User.Hostname, spec.User.Username, string(hack.String(privData))) _, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { failedUsers = append(failedUsers, spec.User.String()) @@ -2046,7 +2047,7 @@ func (e *SimpleExec) executeGrantRole(ctx context.Context, s *ast.GrantRoleStmt) for _, user := range s.Users { for _, role := range s.Roles { sql.Reset() - sqlexec.MustFormatSQL(sql, `INSERT IGNORE INTO %n.%n (FROM_HOST, FROM_USER, TO_HOST, TO_USER) VALUES (%?,%?,%?,%?)`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `INSERT IGNORE INTO %n.%n (FROM_HOST, FROM_USER, TO_HOST, TO_USER) VALUES (%?,%?,%?,%?)`, mysql.SystemDB, mysql.RoleEdgeTable, role.Hostname, role.Username, user.Hostname, user.Username) if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { logutil.BgLogger().Error(fmt.Sprintf("Error occur when executing %s", sql)) if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { @@ -2180,7 +2181,7 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { func renameUserHostInSystemTable(sqlExecutor sqlexec.SQLExecutor, tableName, usernameColumn, hostColumn string, users *ast.UserToUser) error { ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET %n = %?, %n = %? WHERE %n = %? and %n = %?;`, + sqlescape.MustFormatSQL(sql, `UPDATE %n.%n SET %n = %?, %n = %? WHERE %n = %? and %n = %?;`, mysql.SystemDB, tableName, usernameColumn, users.NewUser.Username, hostColumn, strings.ToLower(users.NewUser.Hostname), usernameColumn, users.OldUser.Username, hostColumn, strings.ToLower(users.OldUser.Hostname)) @@ -2254,7 +2255,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // begin a transaction to delete a user. sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.UserTable, strings.ToLower(user.Hostname), user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.UserTable, strings.ToLower(user.Hostname), user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -2262,7 +2263,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete password history from mysql.password_history. sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, strings.ToLower(user.Hostname), user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.PasswordHistoryTable, strings.ToLower(user.Hostname), user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -2270,7 +2271,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.global_priv sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.GlobalPrivTable, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.GlobalPrivTable, user.Hostname, user.Username) if _, err := sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) if _, err := sqlExecutor.ExecuteInternal(internalCtx, "rollback"); err != nil { @@ -2281,7 +2282,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.db sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.DBTable, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.DBTable, user.Hostname, user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -2289,7 +2290,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.tables_priv sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.TablePrivTable, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.TablePrivTable, user.Hostname, user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -2297,7 +2298,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete privileges from mysql.columns_priv sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.ColumnPrivTable, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, mysql.ColumnPrivTable, user.Hostname, user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -2305,14 +2306,14 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.role_edges sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE TO_HOST = %? and TO_USER = %?;`, mysql.SystemDB, mysql.RoleEdgeTable, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE TO_HOST = %? and TO_USER = %?;`, mysql.SystemDB, mysql.RoleEdgeTable, user.Hostname, user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE FROM_HOST = %? and FROM_USER = %?;`, mysql.SystemDB, mysql.RoleEdgeTable, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE FROM_HOST = %? and FROM_USER = %?;`, mysql.SystemDB, mysql.RoleEdgeTable, user.Hostname, user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -2320,14 +2321,14 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.default_roles sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE DEFAULT_ROLE_HOST = %? and DEFAULT_ROLE_USER = %?;`, mysql.SystemDB, mysql.DefaultRoleTable, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE DEFAULT_ROLE_HOST = %? and DEFAULT_ROLE_USER = %?;`, mysql.SystemDB, mysql.DefaultRoleTable, user.Hostname, user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break } sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE HOST = %? and USER = %?;`, mysql.SystemDB, mysql.DefaultRoleTable, user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE HOST = %? and USER = %?;`, mysql.SystemDB, mysql.DefaultRoleTable, user.Hostname, user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -2335,7 +2336,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e // delete relationship from mysql.global_grants sql.Reset() - sqlexec.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, "global_grants", user.Hostname, user.Username) + sqlescape.MustFormatSQL(sql, `DELETE FROM %n.%n WHERE Host = %? and User = %?;`, mysql.SystemDB, "global_grants", user.Hostname, user.Username) if _, err = sqlExecutor.ExecuteInternal(internalCtx, sql.String()); err != nil { failedUsers = append(failedUsers, user.String()) break @@ -2387,7 +2388,7 @@ func userExists(ctx context.Context, sctx sessionctx.Context, name string, host // use the same internal executor to read within the same transaction, otherwise same as userExists func userExistsInternal(ctx context.Context, sqlExecutor sqlexec.SQLExecutor, name string, host string) (bool, error) { sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `SELECT * FROM %n.%n WHERE User=%? AND Host=%? FOR UPDATE;`, mysql.SystemDB, mysql.UserTable, name, strings.ToLower(host)) + sqlescape.MustFormatSQL(sql, `SELECT * FROM %n.%n WHERE User=%? AND Host=%? FOR UPDATE;`, mysql.SystemDB, mysql.UserTable, name, strings.ToLower(host)) recordSet, err := sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return false, err @@ -2503,7 +2504,7 @@ func (e *SimpleExec) executeSetPwd(ctx context.Context, s *ast.SetPwdStmt) error } // update mysql.user sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, `UPDATE %n.%n SET authentication_string=%?,password_expired='N',password_last_changed=current_timestamp() WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, pwd, u, strings.ToLower(h)) + sqlescape.MustFormatSQL(sql, `UPDATE %n.%n SET authentication_string=%?,password_expired='N',password_last_changed=current_timestamp() WHERE User=%? AND Host=%?;`, mysql.SystemDB, mysql.UserTable, pwd, u, strings.ToLower(h)) _, err = sqlExecutor.ExecuteInternal(ctx, sql.String()) if err != nil { return err diff --git a/pkg/executor/simple_test.go b/pkg/executor/simple_test.go deleted file mode 100644 index 39460920a616d..0000000000000 --- a/pkg/executor/simple_test.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "context" - "fmt" - "strconv" - "testing" - - "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/auth" - "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/server" - "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util/globalconn" - "github.com/stretchr/testify/require" -) - -func TestKillStmt(t *testing.T) { - store, dom := testkit.CreateMockStoreAndDomain(t) - sv := server.CreateMockServer(t, store) - sv.SetDomain(dom) - defer sv.Close() - - conn1 := server.CreateMockConn(t, sv) - tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) - - originCfg := config.GetGlobalConfig() - newCfg := *originCfg - newCfg.EnableGlobalKill = false - config.StoreGlobalConfig(&newCfg) - defer func() { - config.StoreGlobalConfig(originCfg) - }() - - connID := conn1.ID() - - tk.MustExec("use test") - tk.MustExec(fmt.Sprintf("kill %d", connID)) - result := tk.MustQuery("show warnings") - result.Check(testkit.Rows("Warning 1105 Invalid operation. Please use 'KILL TIDB [CONNECTION | QUERY] [connectionID | CONNECTION_ID()]' instead")) - - newCfg2 := *originCfg - newCfg2.EnableGlobalKill = true - config.StoreGlobalConfig(&newCfg2) - - // ZERO serverID, treated as truncated. - tk.MustExec("kill 1") - result = tk.MustQuery("show warnings") - result.Check(testkit.Rows("Warning 1105 Kill failed: Received a 32bits truncated ConnectionID, expect 64bits. Please execute 'KILL [CONNECTION | QUERY] ConnectionID' to send a Kill without truncating ConnectionID.")) - - // truncated - tk.MustExec("kill 101") - result = tk.MustQuery("show warnings") - result.Check(testkit.Rows("Warning 1105 Kill failed: Received a 32bits truncated ConnectionID, expect 64bits. Please execute 'KILL [CONNECTION | QUERY] ConnectionID' to send a Kill without truncating ConnectionID.")) - - // excceed int64 - tk.MustExec("kill 9223372036854775808") // 9223372036854775808 == 2^63 - result = tk.MustQuery("show warnings") - result.Check(testkit.Rows("Warning 1105 Parse ConnectionID failed: unexpected connectionID exceeds int64")) - - // local kill - connIDAllocator := globalconn.NewGlobalAllocator(dom.ServerID, false) - killConnID := connIDAllocator.NextID() - tk.MustExec("kill " + strconv.FormatUint(killConnID, 10)) - result = tk.MustQuery("show warnings") - result.Check(testkit.Rows()) - - tk.MustExecToErr("kill rand()", "Invalid operation. Please use 'KILL TIDB [CONNECTION | QUERY] [connectionID | CONNECTION_ID()]' instead") - // remote kill is tested in `tests/globalkilltest` -} - -func TestUserAttributes(t *testing.T) { - store := testkit.CreateMockStore(t) - rootTK := testkit.NewTestKit(t, store) - ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) - - // https://dev.mysql.com/doc/refman/8.0/en/create-user.html#create-user-comments-attributes - rootTK.MustExec(`CREATE USER testuser COMMENT '1234'`) - rootTK.MustExec(`CREATE USER testuser1 ATTRIBUTE '{"name": "Tom", "age": 19}'`) - _, err := rootTK.Exec(`CREATE USER testuser2 ATTRIBUTE '{"name": "Tom", age: 19}'`) - rootTK.MustExec(`CREATE USER testuser2`) - require.Error(t, err) - rootTK.MustQuery(`SELECT user_attributes FROM mysql.user WHERE user = 'testuser'`).Check(testkit.Rows(`{"metadata": {"comment": "1234"}}`)) - rootTK.MustQuery(`SELECT user_attributes FROM mysql.user WHERE user = 'testuser1'`).Check(testkit.Rows(`{"metadata": {"age": 19, "name": "Tom"}}`)) - rootTK.MustQuery(`SELECT user_attributes FROM mysql.user WHERE user = 'testuser2'`).Check(testkit.Rows(`{}`)) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser'`).Check(testkit.Rows(`{"comment": "1234"}`)) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'`).Check(testkit.Rows(`{"age": 19, "name": "Tom"}`)) - rootTK.MustQueryWithContext(ctx, `SELECT attribute->>"$.age" AS age, attribute->>"$.name" AS name FROM information_schema.user_attributes WHERE user = 'testuser1'`).Check(testkit.Rows(`19 Tom`)) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser2'`).Check(testkit.Rows(``)) - - // https://dev.mysql.com/doc/refman/8.0/en/alter-user.html#alter-user-comments-attributes - rootTK.MustExec(`ALTER USER testuser1 ATTRIBUTE '{"age": 20, "sex": "male"}'`) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'`).Check(testkit.Rows(`{"age": 20, "name": "Tom", "sex": "male"}`)) - rootTK.MustExec(`ALTER USER testuser1 ATTRIBUTE '{"hobby": "soccer"}'`) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'`).Check(testkit.Rows(`{"age": 20, "hobby": "soccer", "name": "Tom", "sex": "male"}`)) - rootTK.MustExec(`ALTER USER testuser1 ATTRIBUTE '{"sex": null, "hobby": null}'`) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'`).Check(testkit.Rows(`{"age": 20, "name": "Tom"}`)) - rootTK.MustExec(`ALTER USER testuser1 COMMENT '5678'`) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'`).Check(testkit.Rows(`{"age": 20, "comment": "5678", "name": "Tom"}`)) - rootTK.MustExec(`ALTER USER testuser1 COMMENT ''`) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'`).Check(testkit.Rows(`{"age": 20, "comment": "", "name": "Tom"}`)) - rootTK.MustExec(`ALTER USER testuser1 ATTRIBUTE '{"comment": null}'`) - rootTK.MustQueryWithContext(ctx, `SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'`).Check(testkit.Rows(`{"age": 20, "name": "Tom"}`)) - - // Non-root users could access COMMENT or ATTRIBUTE of all users via the view, - // but not via the mysql.user table. - tk := testkit.NewTestKit(t, store) - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "testuser1"}, nil, nil, nil)) - tk.MustQueryWithContext(ctx, `SELECT user, host, attribute FROM information_schema.user_attributes ORDER BY user`).Check( - testkit.Rows("root % ", "testuser % {\"comment\": \"1234\"}", "testuser1 % {\"age\": 20, \"name\": \"Tom\"}", "testuser2 % ")) - tk.MustGetErrCode(`SELECT user, host, user_attributes FROM mysql.user ORDER BY user`, mysql.ErrTableaccessDenied) - - // https://github.com/pingcap/tidb/issues/39207 - rootTK.MustExec("create user usr1@'%' identified by 'passord'") - rootTK.MustExec("alter user usr1 comment 'comment1'") - rootTK.MustQuery("select user_attributes from mysql.user where user = 'usr1'").Check(testkit.Rows(`{"metadata": {"comment": "comment1"}}`)) - rootTK.MustExec("set global tidb_enable_resource_control = 'on'") - rootTK.MustExec("CREATE RESOURCE GROUP rg1 ru_per_sec = 100") - rootTK.MustExec("alter user usr1 resource group rg1") - rootTK.MustQuery("select user_attributes from mysql.user where user = 'usr1'").Check(testkit.Rows(`{"metadata": {"comment": "comment1"}, "resource_group": "rg1"}`)) -} - -func TestSetResourceGroup(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("SET GLOBAL tidb_enable_resource_control='on'") - - tk.MustContainErrMsg("SET RESOURCE GROUP rg1", "Unknown resource group 'rg1'") - - tk.MustExec("CREATE RESOURCE GROUP rg1 ru_per_sec = 100") - tk.MustExec("ALTER USER `root` RESOURCE GROUP `rg1`") - tk.MustQuery("SELECT CURRENT_RESOURCE_GROUP()").Check(testkit.Rows("default")) - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - tk.MustQuery("SELECT CURRENT_RESOURCE_GROUP()").Check(testkit.Rows("rg1")) - - tk.MustExec("CREATE RESOURCE GROUP rg2 ru_per_sec = 200") - tk.MustExec("SET RESOURCE GROUP `rg2`") - tk.MustQuery("SELECT CURRENT_RESOURCE_GROUP()").Check(testkit.Rows("rg2")) - tk.MustExec("SET RESOURCE GROUP ``") - tk.MustQuery("SELECT CURRENT_RESOURCE_GROUP()").Check(testkit.Rows("default")) - tk.MustExec("SET RESOURCE GROUP default") - tk.MustQuery("SELECT CURRENT_RESOURCE_GROUP()").Check(testkit.Rows("default")) - - tk.RefreshSession() - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - tk.MustQuery("SELECT CURRENT_RESOURCE_GROUP()").Check(testkit.Rows("rg1")) -} diff --git a/pkg/executor/slow_query.go b/pkg/executor/slow_query.go index 7034d4511feef..43416a23cea8d 100644 --- a/pkg/executor/slow_query.go +++ b/pkg/executor/slow_query.go @@ -538,7 +538,7 @@ func (e *slowQueryRetriever) parseLog(ctx context.Context, sctx sessionctx.Conte defer e.memConsume(-logSize) defer func() { if r := recover(); r != nil { - err = fmt.Errorf("%s", r) + err = util.GetRecoverError(r) buf := make([]byte, 4096) stackSize := runtime.Stack(buf, false) buf = buf[:stackSize] diff --git a/pkg/executor/sort.go b/pkg/executor/sort.go index d51d96b152a59..31652778001c3 100644 --- a/pkg/executor/sort.go +++ b/pkg/executor/sort.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/disk" "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/sqlkiller" ) // SortExec represents sorting executor. @@ -232,7 +233,7 @@ func (e *SortExec) fetchRowChunks(ctx context.Context) error { failpoint.Inject("SignalCheckpointForSort", func(val failpoint.Value) { if val.(bool) { if e.Ctx().GetSessionVars().ConnectionID == 123456 { - e.Ctx().GetSessionVars().MemTracker.NeedKill.Store(true) + e.Ctx().GetSessionVars().MemTracker.Killer.SendKillSignal(sqlkiller.QueryMemoryExceeded) } } }) diff --git a/pkg/executor/stale_txn_test.go b/pkg/executor/stale_txn_test.go index ca743c9bd55a0..4b30125453be9 100644 --- a/pkg/executor/stale_txn_test.go +++ b/pkg/executor/stale_txn_test.go @@ -1315,23 +1315,6 @@ func TestPlanCacheWithStaleReadByBinaryProto(t *testing.T) { tk.ResultSetToResult(rs, fmt.Sprintf("%v", rs)).Check(testkit.Rows("1 10")) } -func TestIssue30872(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_txn_mode='pessimistic'") - tk.MustExec("set tx_isolation = 'READ-COMMITTED'") - tk.MustExec("create table t1 (id int primary key, v int)") - tk.MustExec("insert into t1 values(1, 10)") - time.Sleep(time.Millisecond * 100) - tk.MustExec("set @a=now(6)") - time.Sleep(time.Millisecond * 100) - tk.MustExec("update t1 set v=100 where id=1") - tk.MustExec("set autocommit=0") - tk.MustQuery("select * from t1 as of timestamp @a").Check(testkit.Rows("1 10")) -} - func TestIssue33728(t *testing.T) { store := testkit.CreateMockStore(t) @@ -1347,33 +1330,6 @@ func TestIssue33728(t *testing.T) { require.Equal(t, "[planner:8135]invalid as of timestamp: as of timestamp cannot be NULL", err.Error()) } -func TestIssue31954(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t1 (id int primary key, v int)") - tk.MustExec("insert into t1 values(1, 10)") - time.Sleep(time.Millisecond * 100) - tk.MustExec("set @a=now(6)") - time.Sleep(time.Millisecond * 100) - tk.MustExec("update t1 set v=100 where id=1") - - tk.MustQuery("select * from t1 as of timestamp @a where v=(select v from t1 as of timestamp @a where id=1)"). - Check(testkit.Rows("1 10")) - - tk.MustQuery("select (select v from t1 as of timestamp @a where id=1) as v"). - Check(testkit.Rows("10")) -} - -func TestIssue35686(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - // This query should not panic - tk.MustQuery("select * from information_schema.ddl_jobs as of timestamp now()") -} - func TestStalePrepare(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/pkg/executor/statement_context_test.go b/pkg/executor/statement_context_test.go deleted file mode 100644 index 39400a3cff9d0..0000000000000 --- a/pkg/executor/statement_context_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2016 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package executor_test - -import ( - "fmt" - "testing" - "unicode/utf8" - - "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/errno" - "github.com/pingcap/tidb/pkg/testkit" - "github.com/stretchr/testify/require" -) - -const ( - strictModeSQL = "set sql_mode = 'STRICT_TRANS_TABLES'" - nonStrictModeSQL = "set sql_mode = ''" -) - -func TestStatementContext(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table sc (a int)") - tk.MustExec("insert sc values (1), (2)") - - tk.MustExec(strictModeSQL) - tk.MustQuery("select * from sc where a > cast(1.1 as decimal)").Check(testkit.Rows("2")) - tk.MustExec("update sc set a = 4 where a > cast(1.1 as decimal)") - - tk.MustExec(nonStrictModeSQL) - tk.MustExec("update sc set a = 3 where a > cast(1.1 as decimal)") - tk.MustQuery("select * from sc").Check(testkit.Rows("1", "3")) - - tk.MustExec(strictModeSQL) - tk.MustExec("delete from sc") - tk.MustExec("insert sc values ('1.8'+1)") - tk.MustQuery("select * from sc").Check(testkit.Rows("3")) - - // Handle coprocessor flags, '1x' is an invalid int. - // UPDATE and DELETE do select request first which is handled by coprocessor. - // In strict mode we expect error. - tk.MustExecToErr("update sc set a = 4 where a > '1x'") - tk.MustExecToErr("delete from sc where a < '1x'") - tk.MustQuery("select * from sc where a > '1x'").Check(testkit.Rows("3")) - - // Non-strict mode never returns error. - tk.MustExec(nonStrictModeSQL) - tk.MustExec("update sc set a = 4 where a > '1x'") - tk.MustExec("delete from sc where a < '1x'") - tk.MustQuery("select * from sc where a > '1x'").Check(testkit.Rows("4")) - - // Test invalid UTF8 - tk.MustExec("create table sc2 (a varchar(255))") - // Insert an invalid UTF8 - tk.MustExec("insert sc2 values (unhex('4040ffff'))") - require.Greater(t, tk.Session().GetSessionVars().StmtCtx.WarningCount(), uint16(0)) - tk.MustQuery("select * from sc2").Check(testkit.Rows("@@")) - tk.MustExec(strictModeSQL) - tk.MustGetErrCode("insert sc2 values (unhex('4040ffff'))", errno.ErrTruncatedWrongValueForField) - - tk.MustExec("set @@tidb_skip_utf8_check = '1'") - tk.MustExec("insert sc2 values (unhex('4040ffff'))") - tk.MustQuery("select length(a) from sc2").Check(testkit.Rows("2", "4")) - - tk.MustExec("set @@tidb_skip_utf8_check = '0'") - runeErrStr := string(utf8.RuneError) - tk.MustExec(fmt.Sprintf("insert sc2 values ('%s')", runeErrStr)) - - // Test invalid ASCII - tk.MustExec("create table sc3 (a varchar(255)) charset ascii") - - tk.MustExec(nonStrictModeSQL) - tk.MustExec("insert sc3 values (unhex('4040ffff'))") - require.Greater(t, tk.Session().GetSessionVars().StmtCtx.WarningCount(), uint16(0)) - tk.MustQuery("select * from sc3").Check(testkit.Rows("@@")) - - tk.MustExec(strictModeSQL) - tk.MustGetErrCode("insert sc3 values (unhex('4040ffff'))", errno.ErrTruncatedWrongValueForField) - - tk.MustExec("set @@tidb_skip_ascii_check = '1'") - tk.MustExec("insert sc3 values (unhex('4040ffff'))") - tk.MustQuery("select length(a) from sc3").Check(testkit.Rows("2", "4")) - - // no placeholder in ASCII, so just insert '@@'... - tk.MustExec("set @@tidb_skip_ascii_check = '0'") - tk.MustExec("insert sc3 values (unhex('4040'))") - - // Test non-BMP characters. - tk.MustExec(nonStrictModeSQL) - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1(a varchar(100) charset utf8);") - defer tk.MustExec("drop table if exists t1") - tk.MustExec("insert t1 values (unhex('f09f8c80'))") - require.Greater(t, tk.Session().GetSessionVars().StmtCtx.WarningCount(), uint16(0)) - tk.MustQuery("select * from t1").Check(testkit.Rows("")) - tk.MustExec("insert t1 values (unhex('4040f09f8c80'))") - require.Greater(t, tk.Session().GetSessionVars().StmtCtx.WarningCount(), uint16(0)) - tk.MustQuery("select * from t1").Check(testkit.Rows("", "@@")) - tk.MustQuery("select length(a) from t1").Check(testkit.Rows("0", "2")) - tk.MustExec(strictModeSQL) - tk.MustGetErrCode("insert t1 values (unhex('f09f8c80'))", errno.ErrTruncatedWrongValueForField) - tk.MustGetErrCode("insert t1 values (unhex('F0A48BAE'))", errno.ErrTruncatedWrongValueForField) - config.UpdateGlobal(func(conf *config.Config) { - conf.Instance.CheckMb4ValueInUTF8.Store(false) - }) - tk.MustExec("insert t1 values (unhex('f09f8c80'))") - config.UpdateGlobal(func(conf *config.Config) { - conf.Instance.CheckMb4ValueInUTF8.Store(true) - }) - tk.MustExecToErr("insert t1 values (unhex('F0A48BAE'))") -} diff --git a/pkg/executor/test/admintest/BUILD.bazel b/pkg/executor/test/admintest/BUILD.bazel index 7d217e0fa3d1d..75376d7081a9a 100644 --- a/pkg/executor/test/admintest/BUILD.bazel +++ b/pkg/executor/test/admintest/BUILD.bazel @@ -8,7 +8,7 @@ go_test( "main_test.go", ], flaky = True, - shard_count = 21, + shard_count = 18, deps = [ "//pkg/config", "//pkg/domain", diff --git a/pkg/executor/test/admintest/admin_test.go b/pkg/executor/test/admintest/admin_test.go index 63c7bb4372965..eb2d455b8aa1b 100644 --- a/pkg/executor/test/admintest/admin_test.go +++ b/pkg/executor/test/admintest/admin_test.go @@ -45,53 +45,6 @@ import ( "go.uber.org/zap" ) -func TestAdminCheckIndexRange(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists check_index_test;`) - tk.MustExec(`create table check_index_test (a int, b varchar(10), index a_b (a, b), index b (b))`) - tk.MustExec(`insert check_index_test values (3, "ab"),(2, "cd"),(1, "ef"),(-1, "hi")`) - result := tk.MustQuery("admin check index check_index_test a_b (2, 4);") - result.Check(testkit.Rows("1 ef 3", "2 cd 2")) - - result = tk.MustQuery("admin check index check_index_test a_b (3, 5);") - result.Check(testkit.Rows("-1 hi 4", "1 ef 3")) - - tk.MustExec("use mysql") - result = tk.MustQuery("admin check index test.check_index_test a_b (2, 3), (4, 5);") - result.Check(testkit.Rows("-1 hi 4", "2 cd 2")) -} - -func TestAdminCheckIndex(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - check := func() { - tk.MustExec("insert admin_test (c1, c2) values (1, 1), (2, 2), (5, 5), (10, 10), (11, 11), (NULL, NULL)") - tk.MustExec("admin check index admin_test c1") - tk.MustExec("admin check index admin_test c2") - } - tk.MustExec("drop table if exists admin_test") - tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2))") - check() - - // Test for hash partition table. - tk.MustExec("drop table if exists admin_test") - tk.MustExec("create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)) partition by hash(c2) partitions 5;") - check() - - // Test for range partition table. - tk.MustExec("drop table if exists admin_test") - tk.MustExec(`create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)) PARTITION BY RANGE ( c2 ) ( - PARTITION p0 VALUES LESS THAN (5), - PARTITION p1 VALUES LESS THAN (10), - PARTITION p2 VALUES LESS THAN (MAXVALUE))`) - check() -} - func TestAdminRecoverIndex(t *testing.T) { store, domain := testkit.CreateMockStoreAndDomain(t) @@ -345,7 +298,7 @@ func TestClusteredIndexAdminRecoverIndex(t *testing.T) { tblName := model.NewCIStr("t") // Test no corruption case. - tk.MustExec("create table t (a varchar(255), b int, c char(10), primary key(a, c), index idx(b));") + tk.MustExec("create table t (a varchar(255), b int, c char(10), primary key(a, c), index idx(b), index idx1(c));") tk.MustExec("insert into t values ('1', 2, '3'), ('1', 2, '4'), ('1', 2, '5');") tk.MustQuery("admin recover index t `primary`;").Check(testkit.Rows("0 0")) tk.MustQuery("admin recover index t `idx`;").Check(testkit.Rows("0 3")) @@ -362,6 +315,7 @@ func TestClusteredIndexAdminRecoverIndex(t *testing.T) { sc := ctx.GetSessionVars().StmtCtx // Some index entries are missed. + // Recover an index don't covered by clustered index. txn, err := store.Begin() require.NoError(t, err) cHandle := testutil.MustNewCommonHandle(t, "1", "3") @@ -376,6 +330,23 @@ func TestClusteredIndexAdminRecoverIndex(t *testing.T) { tk.MustQuery("admin recover index t idx").Check(testkit.Rows("1 3")) tk.MustQuery("SELECT COUNT(*) FROM t USE INDEX(idx)").Check(testkit.Rows("3")) tk.MustExec("admin check table t;") + + // Recover an index covered by clustered index. + idx1Info := tblInfo.FindIndexByName("idx1") + indexOpr1 := tables.NewIndex(tblInfo.ID, tblInfo, idx1Info) + txn, err = store.Begin() + require.NoError(t, err) + err = indexOpr1.Delete(sc, txn, types.MakeDatums("3"), cHandle) + require.NoError(t, err) + err = txn.Commit(context.Background()) + require.NoError(t, err) + tk.MustGetErrCode("admin check table t", mysql.ErrDataInconsistent) + tk.MustGetErrCode("admin check index t idx1", mysql.ErrDataInconsistent) + + tk.MustQuery("SELECT COUNT(*) FROM t USE INDEX(idx1)").Check(testkit.Rows("2")) + tk.MustQuery("admin recover index t idx1").Check(testkit.Rows("1 3")) + tk.MustQuery("SELECT COUNT(*) FROM t USE INDEX(idx1)").Check(testkit.Rows("3")) + tk.MustExec("admin check table t;") } func TestAdminRecoverPartitionTableIndex(t *testing.T) { @@ -1770,16 +1741,3 @@ func TestAdminCheckTableErrorLocateForClusterIndex(t *testing.T) { tk.MustExec("admin check table admin_test") } } - -func TestAdminCheckTableErrorLocateBigTable(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists admin_test") - tk.MustExec("create table admin_test (c1 int, c2 int, primary key(c1), key(c2))") - tk.MustExec("set cte_max_recursion_depth=100000;") - tk.MustExec("insert into admin_test with recursive cte(a, b) as (select 1, 1 union select a+1, b+1 from cte where cte.a< 100000) select * from cte;") - tk.MustQuery("select /*+ read_from_storage(tikv[`test`.`admin_test`]) */ bit_xor(crc32(md5(concat_ws(0x2, `c1`, `c2`)))), ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024), count(*) from `test`.`admin_test` use index() where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024)") - tk.MustQuery("select bit_xor(crc32(md5(concat_ws(0x2, `c1`, `c2`)))), ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024), count(*) from `test`.`admin_test` use index(`c2`) where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024)") -} diff --git a/pkg/executor/test/aggregate/BUILD.bazel b/pkg/executor/test/aggregate/BUILD.bazel index 06b9638828051..575f1ccf534b3 100644 --- a/pkg/executor/test/aggregate/BUILD.bazel +++ b/pkg/executor/test/aggregate/BUILD.bazel @@ -9,14 +9,12 @@ go_test( ], data = glob(["testdata/**"]), flaky = True, - shard_count = 9, + shard_count = 5, deps = [ "//pkg/config", "//pkg/executor/aggregate", - "//pkg/executor/internal", "//pkg/session", "//pkg/testkit", - "//pkg/testkit/testdata", "//pkg/testkit/testsetup", "//pkg/util/sqlexec", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/executor/test/aggregate/aggregate_test.go b/pkg/executor/test/aggregate/aggregate_test.go index 4e793a2d283bd..83fcbdd73d510 100644 --- a/pkg/executor/test/aggregate/aggregate_test.go +++ b/pkg/executor/test/aggregate/aggregate_test.go @@ -27,220 +27,12 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/executor/aggregate" - "github.com/pingcap/tidb/pkg/executor/internal" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/testkit/testdata" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/stretchr/testify/require" ) -func TestGroupConcatAggr(t *testing.T) { - var err error - // issue #5411 - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists test;") - tk.MustExec("create table test(id int, name int)") - tk.MustExec("insert into test values(1, 10);") - tk.MustExec("insert into test values(1, 20);") - tk.MustExec("insert into test values(1, 30);") - tk.MustExec("insert into test values(2, 20);") - tk.MustExec("insert into test values(3, 200);") - tk.MustExec("insert into test values(3, 500);") - result := tk.MustQuery("select id, group_concat(name) from test group by id order by id") - result.Check(testkit.Rows("1 10,20,30", "2 20", "3 200,500")) - - result = tk.MustQuery("select id, group_concat(name SEPARATOR ';') from test group by id order by id") - result.Check(testkit.Rows("1 10;20;30", "2 20", "3 200;500")) - - result = tk.MustQuery("select id, group_concat(name SEPARATOR ',') from test group by id order by id") - result.Check(testkit.Rows("1 10,20,30", "2 20", "3 200,500")) - - result = tk.MustQuery(`select id, group_concat(name SEPARATOR '%') from test group by id order by id`) - result.Check(testkit.Rows("1 10%20%30", "2 20", `3 200%500`)) - - result = tk.MustQuery("select id, group_concat(name SEPARATOR '') from test group by id order by id") - result.Check(testkit.Rows("1 102030", "2 20", "3 200500")) - - result = tk.MustQuery("select id, group_concat(name SEPARATOR '123') from test group by id order by id") - result.Check(testkit.Rows("1 101232012330", "2 20", "3 200123500")) - - tk.MustQuery("select group_concat(id ORDER BY name) from (select * from test order by id, name limit 2,2) t").Check(testkit.Rows("2,1")) - tk.MustQuery("select group_concat(id ORDER BY name desc) from (select * from test order by id, name limit 2,2) t").Check(testkit.Rows("1,2")) - tk.MustQuery("select group_concat(name ORDER BY id) from (select * from test order by id, name limit 2,2) t").Check(testkit.Rows("30,20")) - tk.MustQuery("select group_concat(name ORDER BY id desc) from (select * from test order by id, name limit 2,2) t").Check(testkit.Rows("20,30")) - - result = tk.MustQuery("select group_concat(name ORDER BY name desc SEPARATOR '++') from test;") - result.Check(testkit.Rows("500++200++30++20++20++10")) - - result = tk.MustQuery("select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test;") - result.Check(testkit.Rows("3--3--1--1--2--1")) - - result = tk.MustQuery("select group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test;") - result.Check(testkit.Rows("500++200++30++20++20++10 3--3--1--1--2--1")) - - result = tk.MustQuery("select group_concat(distinct name order by name desc) from test;") - result.Check(testkit.Rows("500,200,30,20,10")) - - expected := "3--3--1--1--2--1" - for maxLen := 4; maxLen < len(expected); maxLen++ { - tk.MustExec(fmt.Sprintf("set session group_concat_max_len=%v", maxLen)) - result = tk.MustQuery("select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test;") - result.Check(testkit.Rows(expected[:maxLen])) - require.Len(t, tk.Session().GetSessionVars().StmtCtx.GetWarnings(), 1) - } - expected = "1--2--1--1--3--3" - for maxLen := 4; maxLen < len(expected); maxLen++ { - tk.MustExec(fmt.Sprintf("set session group_concat_max_len=%v", maxLen)) - result = tk.MustQuery("select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test;") - result.Check(testkit.Rows(expected[:maxLen])) - require.Len(t, tk.Session().GetSessionVars().StmtCtx.GetWarnings(), 1) - } - expected = "500,200,30,20,10" - for maxLen := 4; maxLen < len(expected); maxLen++ { - tk.MustExec(fmt.Sprintf("set session group_concat_max_len=%v", maxLen)) - result = tk.MustQuery("select group_concat(distinct name order by name desc) from test;") - result.Check(testkit.Rows(expected[:maxLen])) - require.Len(t, tk.Session().GetSessionVars().StmtCtx.GetWarnings(), 1) - } - - tk.MustExec(fmt.Sprintf("set session group_concat_max_len=%v", 1024)) - - // test varchar table - tk.MustExec("drop table if exists test2;") - tk.MustExec("create table test2(id varchar(20), name varchar(20));") - tk.MustExec("insert into test2 select * from test;") - - tk.MustQuery("select group_concat(id ORDER BY name) from (select * from test2 order by id, name limit 2,2) t").Check(testkit.Rows("2,1")) - tk.MustQuery("select group_concat(id ORDER BY name desc) from (select * from test2 order by id, name limit 2,2) t").Check(testkit.Rows("1,2")) - tk.MustQuery("select group_concat(name ORDER BY id) from (select * from test2 order by id, name limit 2,2) t").Check(testkit.Rows("30,20")) - tk.MustQuery("select group_concat(name ORDER BY id desc) from (select * from test2 order by id, name limit 2,2) t").Check(testkit.Rows("20,30")) - - result = tk.MustQuery("select group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test2;") - result.Check(testkit.Rows("500++30++200++20++20++10 3--1--3--1--2--1")) - - // test Position Expr - tk.MustQuery("select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 1 desc, id SEPARATOR '++') from test;").Check(testkit.Rows("1 2 3 4 5 5003++2003++301++201++202++101")) - tk.MustQuery("select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 2 desc, name SEPARATOR '++') from test;").Check(testkit.Rows("1 2 3 4 5 2003++5003++202++101++201++301")) - err = tk.ExecToErr("select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 3 desc, name SEPARATOR '++') from test;") - require.EqualError(t, err, "[planner:1054]Unknown column '3' in 'order clause'") - // test Param Marker - tk.MustExec(`prepare s1 from "select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY floor(id/?) desc, name SEPARATOR '++') from test";`) - tk.MustExec("set @a=2;") - tk.MustQuery("execute s1 using @a;").Check(testkit.Rows("1 2 3 4 5 202++2003++5003++101++201++301")) - - tk.MustExec(`prepare s1 from "select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY ? desc, name SEPARATOR '++') from test";`) - tk.MustExec("set @a=2;") - tk.MustQuery("execute s1 using @a;").Check(testkit.Rows("1 2 3 4 5 2003++5003++202++101++201++301")) - tk.MustExec("set @a=3;") - err = tk.ExecToErr("execute s1 using @a;") - require.EqualError(t, err, "[planner:1054]Unknown column '?' in 'order clause'") - tk.MustExec("set @a=3.0;") - tk.MustQuery("execute s1 using @a;").Check(testkit.Rows("1 2 3 4 5 101++202++201++301++2003++5003")) - - // test partition table - tk.MustExec("drop table if exists ptest;") - tk.MustExec("CREATE TABLE ptest (id int,name int) PARTITION BY RANGE ( id ) " + - "(PARTITION `p0` VALUES LESS THAN (2), PARTITION `p1` VALUES LESS THAN (11))") - tk.MustExec("insert into ptest select * from test;") - - for i := 0; i <= 1; i++ { - for j := 0; j <= 1; j++ { - tk.MustExec(fmt.Sprintf("set session tidb_opt_distinct_agg_push_down = %v", i)) - tk.MustExec(fmt.Sprintf("set session tidb_opt_agg_push_down = %v", j)) - - result = tk.MustQuery("select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest;") - result.Check(testkit.Rows("500++200++30++20++20++10 3--3--1--1--2--1")) - - result = tk.MustQuery("select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest;") - result.Check(testkit.Rows("500,200,30,20,10")) - } - } - - // issue #9920 - tk.MustQuery("select group_concat(123, null)").Check(testkit.Rows("")) - - // issue #23129 - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(cid int, sname varchar(100));") - tk.MustExec("insert into t1 values(1, 'Bob'), (1, 'Alice');") - tk.MustExec("insert into t1 values(3, 'Ace');") - tk.MustExec("set @@group_concat_max_len=5;") - rows := tk.MustQuery("select group_concat(sname order by sname) from t1 group by cid;") - rows.Check(testkit.Rows("Alice", "Ace")) - - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c1 varchar(10));") - tk.MustExec("insert into t1 values('0123456789');") - tk.MustExec("insert into t1 values('12345');") - tk.MustExec("set @@group_concat_max_len=8;") - rows = tk.MustQuery("select group_concat(c1 order by c1) from t1 group by c1;") - rows.Check(testkit.Rows("01234567", "12345")) -} - -func TestSelectDistinct(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - internal.FillData(tk, "select_distinct_test") - - tk.MustExec("begin") - r := tk.MustQuery("select distinct name from select_distinct_test;") - r.Check(testkit.Rows("hello")) - tk.MustExec("commit") -} - -func TestInjectProjBelowTopN(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (i int);") - tk.MustExec("insert into t values (1), (1), (1),(2),(3),(2),(3),(2),(3);") - var ( - input []string - output [][]string - ) - aggMergeSuiteData.LoadTestCases(t, &input, &output) - for i, tt := range input { - testdata.OnRecord(func() { - output[i] = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) - }) - tk.MustQuery(tt).Check(testkit.Rows(output[i]...)) - } -} - -func TestIssue12759HashAggCalledByApply(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.Session().GetSessionVars().SetHashAggFinalConcurrency(4) - tk.MustExec(`insert into mysql.opt_rule_blacklist value("decorrelate");`) - defer func() { - tk.MustExec(`delete from mysql.opt_rule_blacklist where name = "decorrelate";`) - tk.MustExec(`admin reload opt_rule_blacklist;`) - }() - tk.MustExec(`drop table if exists test;`) - tk.MustExec("create table test (a int);") - tk.MustExec("insert into test value(1);") - tk.MustQuery("select /*+ hash_agg() */ sum(a), (select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1) from test tt;").Check(testkit.Rows("1 ")) - - var ( - input []string - output [][]string - ) - aggMergeSuiteData.LoadTestCases(t, &input, &output) - for i, tt := range input { - testdata.OnRecord(func() { - output[i] = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) - }) - tk.MustQuery(tt).Check(testkit.Rows(output[i]...)) - } -} - func TestHashAggRuntimeStat(t *testing.T) { partialInfo := &aggregate.AggWorkerInfo{ Concurrency: 5, diff --git a/pkg/executor/test/aggregate/main_test.go b/pkg/executor/test/aggregate/main_test.go index 2b4aee21617d4..be61f9e975f64 100644 --- a/pkg/executor/test/aggregate/main_test.go +++ b/pkg/executor/test/aggregate/main_test.go @@ -18,18 +18,12 @@ import ( "testing" "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/testkit/testdata" "github.com/pingcap/tidb/pkg/testkit/testsetup" "go.uber.org/goleak" ) -var aggMergeSuiteData testdata.TestData -var testDataMap = make(testdata.BookKeeper) - func TestMain(m *testing.M) { testsetup.SetupForCommonTest() - testDataMap.LoadTestSuiteData("testdata", "agg_suite") - aggMergeSuiteData = testDataMap["agg_suite"] config.UpdateGlobal(func(conf *config.Config) { conf.TiKVClient.AsyncCommit.SafeWindow = 0 conf.TiKVClient.AsyncCommit.AllowedClockDrift = 0 diff --git a/pkg/executor/test/aggregate/testdata/agg_suite_in.json b/pkg/executor/test/aggregate/testdata/agg_suite_in.json deleted file mode 100644 index d53b8e044c656..0000000000000 --- a/pkg/executor/test/aggregate/testdata/agg_suite_in.json +++ /dev/null @@ -1,19 +0,0 @@ -[ - { - "name": "TestInjectProjBelowTopN", - "cases": [ - "explain format = 'brief' select * from t order by i + 1", - "select * from t order by i + 1", - "explain format = 'brief' select * from t order by i + 1 limit 2", - "select * from t order by i + 1 limit 2", - "select i, i, i from t order by i + 1" - ] - }, - { - "name": "TestIssue12759HashAggCalledByApply", - "cases": [ - // make sure the plan is Apply -> Apply -> Apply -> HashAgg, and the count of Apply is equal to HashAggFinalConcurrency-1. - "explain format = 'brief' select /*+ hash_agg() */ sum(a), (select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1) from test tt" - ] - } -] diff --git a/pkg/executor/test/aggregate/testdata/agg_suite_out.json b/pkg/executor/test/aggregate/testdata/agg_suite_out.json deleted file mode 100644 index eb8072a04f224..0000000000000 --- a/pkg/executor/test/aggregate/testdata/agg_suite_out.json +++ /dev/null @@ -1,81 +0,0 @@ -[ - { - "Name": "TestInjectProjBelowTopN", - "Cases": [ - [ - "Projection 10000.00 root test.t.i", - "└─Sort 10000.00 root Column#3", - " └─Projection 10000.00 root test.t.i, plus(test.t.i, 1)->Column#3", - " └─TableReader 10000.00 root data:TableFullScan", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - [ - "1", - "1", - "1", - "2", - "2", - "2", - "3", - "3", - "3" - ], - [ - "Projection 2.00 root test.t.i", - "└─TopN 2.00 root Column#3, offset:0, count:2", - " └─Projection 2.00 root test.t.i, plus(test.t.i, 1)->Column#3", - " └─TableReader 2.00 root data:TopN", - " └─TopN 2.00 cop[tikv] plus(test.t.i, 1), offset:0, count:2", - " └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo" - ], - [ - "1", - "1" - ], - [ - "1 1 1", - "1 1 1", - "1 1 1", - "2 2 2", - "2 2 2", - "2 2 2", - "3 3 3", - "3 3 3", - "3 3 3" - ] - ] - }, - { - "Name": "TestIssue12759HashAggCalledByApply", - "Cases": [ - [ - "Projection 1.00 root Column#9, Column#12, Column#15, Column#18", - "└─Apply 1.00 root CARTESIAN left outer join", - " ├─Apply(Build) 1.00 root CARTESIAN left outer join", - " │ ├─Apply(Build) 1.00 root CARTESIAN left outer join", - " │ │ ├─HashAgg(Build) 1.00 root funcs:sum(Column#28)->Column#9, funcs:firstrow(Column#29)->test.test.a", - " │ │ │ └─Projection 10000.00 root cast(test.test.a, decimal(10,0) BINARY)->Column#28, test.test.a->Column#29", - " │ │ │ └─TableReader 10000.00 root data:TableFullScan", - " │ │ │ └─TableFullScan 10000.00 cop[tikv] table:tt keep order:false, stats:pseudo", - " │ │ └─Projection(Probe) 1.00 root ->Column#12", - " │ │ └─Limit 1.00 root offset:0, count:1", - " │ │ └─TableReader 1.00 root data:Limit", - " │ │ └─Limit 1.00 cop[tikv] offset:0, count:1", - " │ │ └─Selection 1.00 cop[tikv] eq(test.test.a, test.test.a)", - " │ │ └─TableFullScan 1000.00 cop[tikv] table:test keep order:false, stats:pseudo", - " │ └─Projection(Probe) 1.00 root ->Column#15", - " │ └─Limit 1.00 root offset:0, count:1", - " │ └─TableReader 1.00 root data:Limit", - " │ └─Limit 1.00 cop[tikv] offset:0, count:1", - " │ └─Selection 1.00 cop[tikv] eq(test.test.a, test.test.a)", - " │ └─TableFullScan 1000.00 cop[tikv] table:test keep order:false, stats:pseudo", - " └─Projection(Probe) 1.00 root ->Column#18", - " └─Limit 1.00 root offset:0, count:1", - " └─TableReader 1.00 root data:Limit", - " └─Limit 1.00 cop[tikv] offset:0, count:1", - " └─Selection 1.00 cop[tikv] eq(test.test.a, test.test.a)", - " └─TableFullScan 1000.00 cop[tikv] table:test keep order:false, stats:pseudo" - ] - ] - } -] diff --git a/pkg/executor/test/analyzetest/BUILD.bazel b/pkg/executor/test/analyzetest/BUILD.bazel index f3504f59f6e80..5f37de422e445 100644 --- a/pkg/executor/test/analyzetest/BUILD.bazel +++ b/pkg/executor/test/analyzetest/BUILD.bazel @@ -9,7 +9,7 @@ go_test( "main_test.go", ], flaky = True, - shard_count = 50, + shard_count = 48, deps = [ "//pkg/config", "//pkg/domain", diff --git a/pkg/executor/test/analyzetest/analyze_test.go b/pkg/executor/test/analyzetest/analyze_test.go index 59de964d96ace..6fa02510e342b 100644 --- a/pkg/executor/test/analyzetest/analyze_test.go +++ b/pkg/executor/test/analyzetest/analyze_test.go @@ -122,38 +122,6 @@ func TestAnalyzeReplicaReadFollower(t *testing.T) { ctx.GetSessionVars().SetReplicaRead(kv.ReplicaReadFollower) tk.MustExec("analyze table t") } - -func TestClusterIndexAnalyze(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("drop database if exists test_cluster_index_analyze;") - tk.MustExec("create database test_cluster_index_analyze;") - tk.MustExec("use test_cluster_index_analyze;") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec("create table t (a int, b int, c int, primary key(a, b));") - for i := 0; i < 100; i++ { - tk.MustExec("insert into t values (?, ?, ?)", i, i, i) - } - tk.MustExec("analyze table t;") - tk.MustExec("drop table t;") - - tk.MustExec("create table t (a varchar(255), b int, c float, primary key(c, a));") - for i := 0; i < 100; i++ { - tk.MustExec("insert into t values (?, ?, ?)", strconv.Itoa(i), i, i) - } - tk.MustExec("analyze table t;") - tk.MustExec("drop table t;") - - tk.MustExec("create table t (a char(10), b decimal(5, 3), c int, primary key(a, c, b));") - for i := 0; i < 100; i++ { - tk.MustExec("insert into t values (?, ?, ?)", strconv.Itoa(i), i, i) - } - tk.MustExec("analyze table t;") - tk.MustExec("drop table t;") -} - func TestAnalyzeRestrict(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -246,28 +214,6 @@ func TestAnalyzeTooLongColumns(t *testing.T) { require.Equal(t, int64(65559), tbl.Columns[1].TotColSize) } -func TestAnlyzeIssue(t *testing.T) { - // Issue15993 - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("set @@tidb_analyze_version = 1") - tk.MustExec("use test") - tk.MustExec("drop table if exists t0") - tk.MustExec("CREATE TABLE t0(c0 INT PRIMARY KEY);") - tk.MustExec("ANALYZE TABLE t0 INDEX PRIMARY;") - // Issue15751 - tk.MustExec("drop table if exists t0") - tk.MustExec("CREATE TABLE t0(c0 INT, c1 INT, PRIMARY KEY(c0, c1))") - tk.MustExec("INSERT INTO t0 VALUES (0, 0)") - tk.MustExec("ANALYZE TABLE t0") - // Issue15752 - tk.MustExec("drop table if exists t0") - tk.MustExec("CREATE TABLE t0(c0 INT)") - tk.MustExec("INSERT INTO t0 VALUES (0)") - tk.MustExec("CREATE INDEX i0 ON t0(c0)") - tk.MustExec("ANALYZE TABLE t0 INDEX i0") -} - func TestFailedAnalyzeRequest(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1517,34 +1463,6 @@ func TestAnalyzeColumnsWithDynamicPartitionTable(t *testing.T) { } } -func TestIssue34228(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`USE test`) - tk.MustExec(`DROP TABLE IF EXISTS Issue34228`) - tk.MustExec(`CREATE TABLE Issue34228 (id bigint NOT NULL, dt datetime NOT NULL) PARTITION BY RANGE COLUMNS(dt) (PARTITION p202201 VALUES LESS THAN ("2022-02-01"), PARTITION p202202 VALUES LESS THAN ("2022-03-01"))`) - tk.MustExec(`INSERT INTO Issue34228 VALUES (1, '2022-02-01 00:00:02'), (2, '2022-02-01 00:00:02')`) - tk.MustExec(`SET @@global.tidb_analyze_version = 1`) - tk.MustExec(`SET @@session.tidb_partition_prune_mode = 'static'`) - tk.MustExec(`ANALYZE TABLE Issue34228`) - tk.MustExec(`SET @@session.tidb_partition_prune_mode = 'dynamic'`) - tk.MustExec(`ANALYZE TABLE Issue34228`) - tk.MustQuery(`SELECT * FROM Issue34228`).Sort().Check(testkit.Rows("1 2022-02-01 00:00:02", "2 2022-02-01 00:00:02")) - // Needs a second run to hit the issue - tk2 := testkit.NewTestKit(t, store) - tk2.MustExec(`USE test`) - tk2.MustExec(`DROP TABLE IF EXISTS Issue34228`) - tk2.MustExec(`CREATE TABLE Issue34228 (id bigint NOT NULL, dt datetime NOT NULL) PARTITION BY RANGE COLUMNS(dt) (PARTITION p202201 VALUES LESS THAN ("2022-02-01"), PARTITION p202202 VALUES LESS THAN ("2022-03-01"))`) - tk2.MustExec(`INSERT INTO Issue34228 VALUES (1, '2022-02-01 00:00:02'), (2, '2022-02-01 00:00:02')`) - tk2.MustExec(`SET @@global.tidb_analyze_version = 1`) - tk2.MustExec(`SET @@session.tidb_partition_prune_mode = 'static'`) - tk2.MustExec(`ANALYZE TABLE Issue34228`) - tk2.MustExec(`SET @@session.tidb_partition_prune_mode = 'dynamic'`) - tk2.MustExec(`ANALYZE TABLE Issue34228`) - tk2.MustQuery(`SELECT * FROM Issue34228`).Sort().Check(testkit.Rows("1 2022-02-01 00:00:02", "2 2022-02-01 00:00:02")) -} - func TestAnalyzeColumnsWithStaticPartitionTable(t *testing.T) { for _, val := range []model.ColumnChoice{model.ColumnList, model.PredicateColumns} { func(choice model.ColumnChoice) { @@ -2037,11 +1955,10 @@ func testKillAutoAnalyze(t *testing.T, ver int) { } } -func TestKillAutoAnalyzeV1(t *testing.T) { +func TestKillAutoAnalyze(t *testing.T) { + // version 1 testKillAutoAnalyze(t, 1) -} - -func TestKillAutoAnalyzeV2(t *testing.T) { + // version 2 testKillAutoAnalyze(t, 2) } @@ -2874,19 +2791,6 @@ func TestAnalyzeColumnsSkipMVIndexJsonCol(t *testing.T) { require.False(t, stats.Indices[tblInfo.Indices[1].ID].IsStatsInitialized()) } -func TestManualAnalyzeSkipColumnTypes(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t(a int, b int, c json, d text, e mediumtext, f blob, g mediumblob, index idx(d(10)))") - tk.MustExec("set @@session.tidb_analyze_skip_column_types = 'json,blob,mediumblob,text,mediumtext'") - tk.MustExec("analyze table t") - tk.MustQuery("select job_info from mysql.analyze_jobs where job_info like '%analyze table%'").Check(testkit.Rows("analyze table columns a, b, d with 256 buckets, 500 topn, 1 samplerate")) - tk.MustExec("delete from mysql.analyze_jobs") - tk.MustExec("analyze table t columns a, e") - tk.MustQuery("select job_info from mysql.analyze_jobs where job_info like '%analyze table%'").Check(testkit.Rows("analyze table columns a, d with 256 buckets, 500 topn, 1 samplerate")) -} - // TestAnalyzeMVIndex tests analyzing the mv index use some real data in the table. // It checks the analyze jobs, async loading and the stats content in the memory. func TestAnalyzeMVIndex(t *testing.T) { diff --git a/pkg/executor/test/analyzetest/memorycontrol/BUILD.bazel b/pkg/executor/test/analyzetest/memorycontrol/BUILD.bazel index f06e27b983375..a15332cc6ef7b 100644 --- a/pkg/executor/test/analyzetest/memorycontrol/BUILD.bazel +++ b/pkg/executor/test/analyzetest/memorycontrol/BUILD.bazel @@ -16,7 +16,6 @@ go_test( "//pkg/statistics/handle/autoanalyze", "//pkg/testkit", "//pkg/util", - "//pkg/util/memory", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", "@org_uber_go_goleak//:goleak", diff --git a/pkg/executor/test/analyzetest/memorycontrol/memory_control_test.go b/pkg/executor/test/analyzetest/memorycontrol/memory_control_test.go index 59ad3717ef8e9..ed1d427609114 100644 --- a/pkg/executor/test/analyzetest/memorycontrol/memory_control_test.go +++ b/pkg/executor/test/analyzetest/memorycontrol/memory_control_test.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/tidb/pkg/statistics/handle/autoanalyze" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/memory" "github.com/stretchr/testify/require" ) @@ -53,7 +52,7 @@ func TestGlobalMemoryControlForAnalyze(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/util/memory/ReadMemStats", `return(536870912)`)) require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/executor/mockAnalyzeMergeWorkerSlowConsume", `return(100)`)) _, err := tk0.Exec(sql) - require.True(t, strings.Contains(err.Error(), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForInstance)) + require.True(t, strings.Contains(err.Error(), "Your query has been cancelled due to exceeding the allowed memory limit for the tidb-server instance and this query is currently using the most memory. Please try narrowing your query scope or increase the tidb_server_memory_limit and try again.")) runtime.GC() require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/util/memory/ReadMemStats")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/executor/mockAnalyzeMergeWorkerSlowConsume")) @@ -96,7 +95,7 @@ func TestGlobalMemoryControlForPrepareAnalyze(t *testing.T) { require.NoError(t, err0) _, err1 := tk0.Exec(sqlExecute) // Killed and the WarnMsg is WarnMsgSuffixForInstance instead of WarnMsgSuffixForSingleQuery - require.True(t, strings.Contains(err1.Error(), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForInstance)) + require.True(t, strings.Contains(err1.Error(), "Your query has been cancelled due to exceeding the allowed memory limit for the tidb-server instance and this query is currently using the most memory. Please try narrowing your query scope or increase the tidb_server_memory_limit and try again.")) runtime.GC() require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/util/memory/ReadMemStats")) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/executor/mockAnalyzeMergeWorkerSlowConsume")) @@ -178,7 +177,7 @@ func TestGlobalMemoryControlForAutoAnalyze(t *testing.T) { h.HandleAutoAnalyze(dom.InfoSchema()) rs := tk.MustQuery("select fail_reason from mysql.analyze_jobs where table_name=? and state=? limit 1", "t", "failed") failReason := rs.Rows()[0][0].(string) - require.True(t, strings.Contains(failReason, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForInstance)) + require.True(t, strings.Contains(failReason, "Your query has been cancelled due to exceeding the allowed memory limit for the tidb-server instance and this query is currently using the most memory. Please try narrowing your query scope or increase the tidb_server_memory_limit and try again.")) childTrackers = executor.GlobalAnalyzeMemoryTracker.GetChildrenForTest() require.Len(t, childTrackers, 0) diff --git a/pkg/executor/test/autoidtest/BUILD.bazel b/pkg/executor/test/autoidtest/BUILD.bazel index 61c420b71bfff..de545e42f2320 100644 --- a/pkg/executor/test/autoidtest/BUILD.bazel +++ b/pkg/executor/test/autoidtest/BUILD.bazel @@ -9,14 +9,13 @@ go_test( ], flaky = True, race = "on", - shard_count = 10, + shard_count = 4, deps = [ "//pkg/autoid_service", "//pkg/config", "//pkg/ddl/testutil", "//pkg/meta/autoid", "//pkg/parser/mysql", - "//pkg/session", "//pkg/sessionctx/variable", "//pkg/testkit", "//pkg/testkit/testutil", diff --git a/pkg/executor/test/autoidtest/autoid_test.go b/pkg/executor/test/autoidtest/autoid_test.go index 06bf8bc80c215..adfab9906f580 100644 --- a/pkg/executor/test/autoidtest/autoid_test.go +++ b/pkg/executor/test/autoidtest/autoid_test.go @@ -16,7 +16,6 @@ package autoid_test import ( "context" - "fmt" "strconv" "strings" "testing" @@ -25,7 +24,6 @@ import ( _ "github.com/pingcap/tidb/pkg/autoid_service" ddltestutil "github.com/pingcap/tidb/pkg/ddl/testutil" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testutil" @@ -95,52 +93,6 @@ func TestFilterDifferentAllocators(t *testing.T) { } } -func TestAutoIncrementInsertMinMax(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - cases := []struct { - t string - s string - vals []int64 - expect [][]interface{} - }{ - {"tinyint", "signed", []int64{-128, 0, 127}, testkit.Rows("-128", "1", "2", "3", "127")}, - {"tinyint", "unsigned", []int64{0, 127, 255}, testkit.Rows("1", "2", "127", "128", "255")}, - {"smallint", "signed", []int64{-32768, 0, 32767}, testkit.Rows("-32768", "1", "2", "3", "32767")}, - {"smallint", "unsigned", []int64{0, 32767, 65535}, testkit.Rows("1", "2", "32767", "32768", "65535")}, - {"mediumint", "signed", []int64{-8388608, 0, 8388607}, testkit.Rows("-8388608", "1", "2", "3", "8388607")}, - {"mediumint", "unsigned", []int64{0, 8388607, 16777215}, testkit.Rows("1", "2", "8388607", "8388608", "16777215")}, - {"integer", "signed", []int64{-2147483648, 0, 2147483647}, testkit.Rows("-2147483648", "1", "2", "3", "2147483647")}, - {"integer", "unsigned", []int64{0, 2147483647, 4294967295}, testkit.Rows("1", "2", "2147483647", "2147483648", "4294967295")}, - {"bigint", "signed", []int64{-9223372036854775808, 0, 9223372036854775807}, testkit.Rows("-9223372036854775808", "1", "2", "3", "9223372036854775807")}, - {"bigint", "unsigned", []int64{0, 9223372036854775807}, testkit.Rows("1", "2", "9223372036854775807", "9223372036854775808")}, - } - - for _, option := range []string{"", "auto_id_cache 1", "auto_id_cache 100"} { - for idx, c := range cases { - sql := fmt.Sprintf("create table t%d (a %s %s key auto_increment) %s", idx, c.t, c.s, option) - tk.MustExec(sql) - - for _, val := range c.vals { - tk.MustExec(fmt.Sprintf("insert into t%d values (%d)", idx, val)) - tk.Exec(fmt.Sprintf("insert into t%d values ()", idx)) // ignore error - } - - tk.MustQuery(fmt.Sprintf("select * from t%d order by a", idx)).Check(c.expect) - - tk.MustExec(fmt.Sprintf("drop table t%d", idx)) - } - } - - tk.MustExec("create table t10 (a integer key auto_increment) auto_id_cache 1") - err := tk.ExecToErr("insert into t10 values (2147483648)") - require.Error(t, err) - err = tk.ExecToErr("insert into t10 values (-2147483649)") - require.Error(t, err) -} - func TestInsertWithAutoidSchema(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -154,14 +106,10 @@ func TestInsertWithAutoidSchema(t *testing.T) { tk.MustExec(`create table t7(id int primary key, n double unsigned auto_increment, key I_n(n));`) // test for inserting multiple values tk.MustExec(`create table t8(id int primary key auto_increment, n int);`) - testInsertWithAutoidSchema(t, tk) -} -func TestInsertWithAutoidSchemaCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) + // test for auto_id_cache = 1 + tk.MustExec(`drop table if exists t1, t2, t3, t4, t5, t6, t7, t8`) tk.MustExec(`create table t1(id int primary key auto_increment, n int) AUTO_ID_CACHE 1;`) tk.MustExec(`create table t2(id int unsigned primary key auto_increment, n int) AUTO_ID_CACHE 1;`) tk.MustExec(`create table t3(id tinyint primary key auto_increment, n int) AUTO_ID_CACHE 1;`) @@ -171,7 +119,6 @@ func TestInsertWithAutoidSchemaCache(t *testing.T) { tk.MustExec(`create table t7(id int primary key, n double unsigned auto_increment, key I_n(n)) AUTO_ID_CACHE 1;`) // test for inserting multiple values tk.MustExec(`create table t8(id int primary key auto_increment, n int);`) - testInsertWithAutoidSchema(t, tk) } @@ -591,153 +538,6 @@ func testInsertWithAutoidSchema(t *testing.T, tk *testkit.TestKit) { } } -// TestAutoIDIncrementAndOffset There is a potential issue in MySQL: when the value of auto_increment_offset is greater -// than that of auto_increment_increment, the value of auto_increment_offset is ignored -// (https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_increment), -// This issue is a flaw of the implementation of MySQL and it doesn't exist in TiDB. -func TestAutoIDIncrementAndOffset(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - // Test for offset is larger than increment. - tk.Session().GetSessionVars().AutoIncrementIncrement = 5 - tk.Session().GetSessionVars().AutoIncrementOffset = 10 - - for _, str := range []string{"", " AUTO_ID_CACHE 1"} { - tk.MustExec(`create table io (a int key auto_increment)` + str) - tk.MustExec(`insert into io values (null),(null),(null)`) - tk.MustQuery(`select * from io`).Check(testkit.Rows("10", "15", "20")) - tk.MustExec(`drop table io`) - } - - // Test handle is PK. - for _, str := range []string{"", " AUTO_ID_CACHE 1"} { - tk.MustExec(`create table io (a int key auto_increment)` + str) - tk.Session().GetSessionVars().AutoIncrementOffset = 10 - tk.Session().GetSessionVars().AutoIncrementIncrement = 2 - tk.MustExec(`insert into io values (),(),()`) - tk.MustQuery(`select * from io`).Check(testkit.Rows("10", "12", "14")) - tk.MustExec(`delete from io`) - - // Test reset the increment. - tk.Session().GetSessionVars().AutoIncrementIncrement = 5 - tk.MustExec(`insert into io values (),(),()`) - tk.MustQuery(`select * from io`).Check(testkit.Rows("15", "20", "25")) - tk.MustExec(`delete from io`) - - tk.Session().GetSessionVars().AutoIncrementIncrement = 10 - tk.MustExec(`insert into io values (),(),()`) - tk.MustQuery(`select * from io`).Check(testkit.Rows("30", "40", "50")) - tk.MustExec(`delete from io`) - - tk.Session().GetSessionVars().AutoIncrementIncrement = 5 - tk.MustExec(`insert into io values (),(),()`) - tk.MustQuery(`select * from io`).Check(testkit.Rows("55", "60", "65")) - tk.MustExec(`drop table io`) - } - - // Test handle is not PK. - for _, str := range []string{"", " AUTO_ID_CACHE 1"} { - tk.Session().GetSessionVars().AutoIncrementIncrement = 2 - tk.Session().GetSessionVars().AutoIncrementOffset = 10 - tk.MustExec(`create table io (a int, b int auto_increment, key(b))` + str) - tk.MustExec(`insert into io(b) values (null),(null),(null)`) - // AutoID allocation will take increment and offset into consideration. - tk.MustQuery(`select b from io`).Check(testkit.Rows("10", "12", "14")) - if str == "" { - // HandleID allocation will ignore the increment and offset. - tk.MustQuery(`select _tidb_rowid from io`).Check(testkit.Rows("15", "16", "17")) - } else { - // Separate row id and auto inc id, increment and offset works on auto inc id - tk.MustQuery(`select _tidb_rowid from io`).Check(testkit.Rows("1", "2", "3")) - } - tk.MustExec(`delete from io`) - - tk.Session().GetSessionVars().AutoIncrementIncrement = 10 - tk.MustExec(`insert into io(b) values (null),(null),(null)`) - tk.MustQuery(`select b from io`).Check(testkit.Rows("20", "30", "40")) - if str == "" { - tk.MustQuery(`select _tidb_rowid from io`).Check(testkit.Rows("41", "42", "43")) - } else { - tk.MustQuery(`select _tidb_rowid from io`).Check(testkit.Rows("4", "5", "6")) - } - - // Test invalid value. - tk.Session().GetSessionVars().AutoIncrementIncrement = -1 - tk.Session().GetSessionVars().AutoIncrementOffset = -2 - tk.MustGetErrMsg(`insert into io(b) values (null),(null),(null)`, - "[autoid:8060]Invalid auto_increment settings: auto_increment_increment: -1, auto_increment_offset: -2, both of them must be in range [1..65535]") - tk.MustExec(`delete from io`) - - tk.Session().GetSessionVars().AutoIncrementIncrement = 65536 - tk.Session().GetSessionVars().AutoIncrementOffset = 65536 - tk.MustGetErrMsg(`insert into io(b) values (null),(null),(null)`, - "[autoid:8060]Invalid auto_increment settings: auto_increment_increment: 65536, auto_increment_offset: 65536, both of them must be in range [1..65535]") - - tk.MustExec(`drop table io`) - } -} - -func TestRenameTableForAutoIncrement(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("USE test;") - tk.MustExec("drop table if exists t1, t2, t3;") - tk.MustExec("create table t1 (id int key auto_increment);") - tk.MustExec("insert into t1 values ()") - tk.MustExec("rename table t1 to t11") - tk.MustExec("insert into t11 values ()") - // TODO(tiancaiamao): fix bug and uncomment here, rename table should not discard the cached AUTO_ID. - // tk.MustQuery("select * from t11").Check(testkit.Rows("1", "2")) - - // auto_id_cache 1 use another implementation and do not have such bug. - tk.MustExec("create table t2 (id int key auto_increment) auto_id_cache 1;") - tk.MustExec("insert into t2 values ()") - tk.MustExec("rename table t2 to t22") - tk.MustExec("insert into t22 values ()") - tk.MustQuery("select * from t22").Check(testkit.Rows("1", "2")) - - tk.MustExec("create table t3 (id int key auto_increment) auto_id_cache 100;") - tk.MustExec("insert into t3 values ()") - tk.MustExec("rename table t3 to t33") - tk.MustExec("insert into t33 values ()") - // TODO(tiancaiamao): fix bug and uncomment here, rename table should not discard the cached AUTO_ID. - // tk.MustQuery("select * from t33").Check(testkit.Rows("1", "2")) -} - -func TestAlterTableAutoIDCache(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("USE test;") - tk.MustExec("drop table if exists t_473;") - tk.MustExec("create table t_473 (id int key auto_increment)") - tk.MustExec("insert into t_473 values ()") - tk.MustQuery("select * from t_473").Check(testkit.Rows("1")) - rs, err := tk.Exec("show table t_473 next_row_id") - require.NoError(t, err) - rows, err1 := session.ResultSetToStringSlice(context.Background(), tk.Session(), rs) - require.NoError(t, err1) - // "test t_473 id 1013608 AUTO_INCREMENT" - val, err2 := strconv.ParseUint(rows[0][3], 10, 64) - require.NoError(t, err2) - - tk.MustExec("alter table t_473 auto_id_cache = 100") - tk.MustQuery("show table t_473 next_row_id").Check(testkit.Rows( - fmt.Sprintf("test t_473 id %d _TIDB_ROWID", val), - "test t_473 id 1 AUTO_INCREMENT", - )) - tk.MustExec("insert into t_473 values ()") - tk.MustQuery("select * from t_473").Check(testkit.Rows("1", fmt.Sprintf("%d", val))) - tk.MustQuery("show table t_473 next_row_id").Check(testkit.Rows( - fmt.Sprintf("test t_473 id %d _TIDB_ROWID", val+100), - "test t_473 id 1 AUTO_INCREMENT", - )) - - // Note that auto_id_cache=1 use a different implementation, switch between them is not allowed. - // TODO: relax this restriction and update the test case. - tk.MustExecToErr("alter table t_473 auto_id_cache = 1") -} - func TestMockAutoIDServiceError(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -767,42 +567,3 @@ func TestIssue39528(t *testing.T) { // Make sure the code does not visit tikv on allocate path. require.False(t, codeRun) } - -func TestAutoIDConstraint(t *testing.T) { - // Remove the constraint that auto id column must be defined as a key - // See https://github.com/pingcap/tidb/issues/40580 - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - - // Cover: create table with/without key constraint - template := `create table t%d (id int auto_increment,k int,c char(120)%s) %s` - keyDefs := []string{"", - ",PRIMARY KEY(k, id)", - ",key idx_1(id)", - ",PRIMARY KEY(`k`, `id`), key idx_1(id)", - } - engineDefs := []string{"", - "engine = MyISAM", - "engine = InnoDB", - "auto_id_cache 1", - "auto_id_cache 100", - } - i := 0 - for _, keyDef := range keyDefs { - for _, engineDef := range engineDefs { - tk.MustExec(fmt.Sprintf("drop table if exists t%d", i)) - sql := fmt.Sprintf(template, i, keyDef, engineDef) - tk.MustExec(sql) - i++ - } - } - - // alter table add auto id column is not supported, but cover it here to prevent regression - tk.MustExec("create table tt1 (id int)") - tk.MustExecToErr("alter table tt1 add column (c int auto_increment)") - - // Cover case: create table with auto id column as key, and remove it later - tk.MustExec("create table tt2 (id int, c int auto_increment, key c_idx(c))") - tk.MustExec("alter table tt2 drop index c_idx") -} diff --git a/pkg/executor/test/executor/BUILD.bazel b/pkg/executor/test/executor/BUILD.bazel index 50001434b57b9..8046b5d761d57 100644 --- a/pkg/executor/test/executor/BUILD.bazel +++ b/pkg/executor/test/executor/BUILD.bazel @@ -40,6 +40,7 @@ go_test( "//pkg/testkit/testdata", "//pkg/types", "//pkg/util", + "//pkg/util/dbterror/exeerrors", "//pkg/util/memory", "//pkg/util/mock", "//pkg/util/replayer", diff --git a/pkg/executor/test/executor/executor_test.go b/pkg/executor/test/executor/executor_test.go index ae6af5226a609..42b59f2da13ac 100644 --- a/pkg/executor/test/executor/executor_test.go +++ b/pkg/executor/test/executor/executor_test.go @@ -61,6 +61,7 @@ import ( "github.com/pingcap/tidb/pkg/testkit/testdata" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/mock" "github.com/pingcap/tidb/pkg/util/replayer" @@ -1886,34 +1887,34 @@ func TestOOMPanicAction(t *testing.T) { tk.MustExec("set @@tidb_mem_quota_query=1;") err := tk.QueryToErr("select sum(b) from t group by a;") require.Error(t, err) - require.Regexp(t, memory.PanicMemoryExceedWarnMsg, err.Error()) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) // Test insert from select oom panic. tk.MustExec("drop table if exists t,t1") tk.MustExec("create table t (a bigint);") tk.MustExec("create table t1 (a bigint);") tk.MustExec("set @@tidb_mem_quota_query=200;") - tk.MustMatchErrMsg("insert into t1 values (1),(2),(3),(4),(5);", memory.PanicMemoryExceedWarnMsg) - tk.MustMatchErrMsg("replace into t1 values (1),(2),(3),(4),(5);", memory.PanicMemoryExceedWarnMsg) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr("insert into t1 values (1),(2),(3),(4),(5);"))) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr("replace into t1 values (1),(2),(3),(4),(5);"))) tk.MustExec("set @@tidb_mem_quota_query=10000") tk.MustExec("insert into t1 values (1),(2),(3),(4),(5);") tk.MustExec("set @@tidb_mem_quota_query=10;") - tk.MustMatchErrMsg("insert into t select a from t1 order by a desc;", memory.PanicMemoryExceedWarnMsg) - tk.MustMatchErrMsg("replace into t select a from t1 order by a desc;", memory.PanicMemoryExceedWarnMsg) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr("insert into t select a from t1 order by a desc;"))) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr("replace into t select a from t1 order by a desc;"))) tk.MustExec("set @@tidb_mem_quota_query=10000") tk.MustExec("insert into t values (1),(2),(3),(4),(5);") // Set the memory quota to 244 to make this SQL panic during the DeleteExec // instead of the TableReaderExec. tk.MustExec("set @@tidb_mem_quota_query=244;") - tk.MustMatchErrMsg("delete from t", memory.PanicMemoryExceedWarnMsg) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr("delete from t"))) tk.MustExec("set @@tidb_mem_quota_query=10000;") tk.MustExec("delete from t1") tk.MustExec("insert into t1 values(1)") tk.MustExec("insert into t values (1),(2),(3),(4),(5);") tk.MustExec("set @@tidb_mem_quota_query=244;") - tk.MustMatchErrMsg("delete t, t1 from t join t1 on t.a = t1.a", memory.PanicMemoryExceedWarnMsg) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr("delete t, t1 from t join t1 on t.a = t1.a"))) tk.MustExec("set @@tidb_mem_quota_query=100000;") tk.MustExec("truncate table t") @@ -1921,7 +1922,7 @@ func TestOOMPanicAction(t *testing.T) { // set the memory to quota to make the SQL panic during UpdateExec instead // of TableReader. tk.MustExec("set @@tidb_mem_quota_query=244;") - tk.MustMatchErrMsg("update t set a = 4", memory.PanicMemoryExceedWarnMsg) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr("update t set a = 4"))) } func TestPointGetPreparedPlan(t *testing.T) { @@ -2670,11 +2671,11 @@ func TestAdminShowDDLJobs(t *testing.T) { tk.MustExec(`set @@time_zone = 'Asia/Shanghai'`) re = tk.MustQuery("admin show ddl jobs where end_time is not NULL") row = re.Rows()[0] - createTime, err := types.ParseDatetime(nil, row[8].(string)) + createTime, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row[8].(string)) require.NoError(t, err) - startTime, err := types.ParseDatetime(nil, row[9].(string)) + startTime, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row[9].(string)) require.NoError(t, err) - endTime, err := types.ParseDatetime(nil, row[10].(string)) + endTime, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row[10].(string)) require.NoError(t, err) tk.MustExec(`set @@time_zone = 'Europe/Amsterdam'`) re = tk.MustQuery("admin show ddl jobs where end_time is not NULL") @@ -2682,11 +2683,11 @@ func TestAdminShowDDLJobs(t *testing.T) { require.NotEqual(t, row[8], row2[8]) require.NotEqual(t, row[9], row2[9]) require.NotEqual(t, row[10], row2[10]) - createTime2, err := types.ParseDatetime(nil, row2[8].(string)) + createTime2, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row2[8].(string)) require.NoError(t, err) - startTime2, err := types.ParseDatetime(nil, row2[9].(string)) + startTime2, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row2[9].(string)) require.NoError(t, err) - endTime2, err := types.ParseDatetime(nil, row2[10].(string)) + endTime2, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, row2[10].(string)) require.NoError(t, err) loc, err := time.LoadLocation("Asia/Shanghai") require.NoError(t, err) @@ -3954,7 +3955,7 @@ func TestSummaryFailedUpdate(t *testing.T) { tk.MustExec("SET GLOBAL tidb_mem_oom_action='CANCEL'") require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) tk.MustExec("set @@tidb_mem_quota_query=1") - tk.MustMatchErrMsg("update t set t.a = t.a - 1 where t.a in (select a from t where a < 4)", memory.PanicMemoryExceedWarnMsg) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr("update t set t.a = t.a - 1 where t.a in (select a from t where a < 4)"))) tk.MustExec("set @@tidb_mem_quota_query=1000000000") tk.MustQuery("select stmt_type from information_schema.statements_summary where digest_text = 'update `t` set `t` . `a` = `t` . `a` - ? where `t` . `a` in ( select `a` from `t` where `a` < ? )'").Check(testkit.Rows("Update")) } @@ -4085,7 +4086,7 @@ func TestGlobalMemoryControl2(t *testing.T) { wg.Done() }() sql := "select * from t t1 join t t2 join t t3 on t1.a=t2.a and t1.a=t3.a order by t1.a;" // Need 500MB - require.True(t, strings.Contains(tk0.QueryToErr(sql).Error(), memory.PanicMemoryExceedWarnMsg)) + require.True(t, exeerrors.ErrMemoryExceedForInstance.Equal(tk0.QueryToErr(sql))) require.Equal(t, tk0.Session().GetSessionVars().DiskTracker.MaxConsumed(), int64(0)) wg.Wait() test[0] = 0 @@ -4104,7 +4105,7 @@ func TestCompileOutOfMemoryQuota(t *testing.T) { tk.MustExec("create table t1(a int, c int, index idx(a))") tk.MustExec("set tidb_mem_quota_query=10") err := tk.ExecToErr("select t.a, t1.a from t use index(idx), t1 use index(idx) where t.a = t1.a") - require.Contains(t, err.Error(), memory.PanicMemoryExceedWarnMsg) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } func TestSignalCheckpointForSort(t *testing.T) { @@ -4130,7 +4131,7 @@ func TestSignalCheckpointForSort(t *testing.T) { tk.Session().GetSessionVars().ConnectionID = 123456 err := tk.QueryToErr("select * from t order by a") - require.Contains(t, err.Error(), memory.PanicMemoryExceedWarnMsg) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } func TestSessionRootTrackerDetach(t *testing.T) { @@ -4142,7 +4143,9 @@ func TestSessionRootTrackerDetach(t *testing.T) { tk.MustExec("create table t(a int, b int, index idx(a))") tk.MustExec("create table t1(a int, c int, index idx(a))") tk.MustExec("set tidb_mem_quota_query=10") - tk.MustContainErrMsg("select /*+hash_join(t1)*/ t.a, t1.a from t use index(idx), t1 use index(idx) where t.a = t1.a", memory.PanicMemoryExceedWarnMsg) + err := tk.ExecToErr("select /*+hash_join(t1)*/ t.a, t1.a from t use index(idx), t1 use index(idx) where t.a = t1.a") + fmt.Println(err.Error()) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) tk.MustExec("set tidb_mem_quota_query=1000") rs, err := tk.Exec("select /*+hash_join(t1)*/ t.a, t1.a from t use index(idx), t1 use index(idx) where t.a = t1.a") require.NoError(t, err) diff --git a/pkg/executor/test/fktest/BUILD.bazel b/pkg/executor/test/fktest/BUILD.bazel index 27434096bbbe9..f4a92763b90ee 100644 --- a/pkg/executor/test/fktest/BUILD.bazel +++ b/pkg/executor/test/fktest/BUILD.bazel @@ -25,7 +25,6 @@ go_test( "//pkg/testkit", "//pkg/types", "//pkg/util/dbterror/exeerrors", - "//pkg/util/memory", "//pkg/util/sqlexec", "//tests/realtikvtest", "@com_github_stretchr_testify//require", diff --git a/pkg/executor/test/fktest/foreign_key_test.go b/pkg/executor/test/fktest/foreign_key_test.go index 17e086f3154ad..4c074b1a379fe 100644 --- a/pkg/executor/test/fktest/foreign_key_test.go +++ b/pkg/executor/test/fktest/foreign_key_test.go @@ -38,7 +38,6 @@ import ( "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" - "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/pingcap/tidb/tests/realtikvtest" "github.com/stretchr/testify/require" @@ -2530,7 +2529,7 @@ func TestForeignKeyAndMemoryTracker(t *testing.T) { // foreign key cascade behaviour will exceed memory quota. err := tk.ExecToErr("update t1 set id=id+100000 where id=1") require.Error(t, err) - require.Contains(t, err.Error(), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) tk.MustQuery("select id,pid from t1 where id = 1").Check(testkit.Rows("1 ")) tk.MustExec("set @@foreign_key_checks=0") // After disable foreign_key_checks, following DML will execute successful. diff --git a/pkg/executor/test/indexmergereadtest/BUILD.bazel b/pkg/executor/test/indexmergereadtest/BUILD.bazel index 8aaee06a26f1c..ce626d630163f 100644 --- a/pkg/executor/test/indexmergereadtest/BUILD.bazel +++ b/pkg/executor/test/indexmergereadtest/BUILD.bazel @@ -9,7 +9,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 36, + shard_count = 37, deps = [ "//pkg/config", "//pkg/executor", @@ -18,7 +18,7 @@ go_test( "//pkg/testkit", "//pkg/testkit/testutil", "//pkg/util", - "//pkg/util/memory", + "//pkg/util/dbterror/exeerrors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//tikv", diff --git a/pkg/executor/test/indexmergereadtest/index_merge_reader_test.go b/pkg/executor/test/indexmergereadtest/index_merge_reader_test.go index 92ef516dd58df..6049727b18ed6 100644 --- a/pkg/executor/test/indexmergereadtest/index_merge_reader_test.go +++ b/pkg/executor/test/indexmergereadtest/index_merge_reader_test.go @@ -33,7 +33,7 @@ import ( "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/testutil" "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/stretchr/testify/require" ) @@ -164,27 +164,35 @@ func TestPartitionTableRandomIndexMerge(t *testing.T) { partition p4 values less than (40))`) tk.MustExec(`create table tnormal (a int, b int, key(a), key(b))`) - values := make([]string, 0, 128) - for i := 0; i < 128; i++ { - values = append(values, fmt.Sprintf("(%v, %v)", rand.Intn(40), rand.Intn(40))) + values := make([]string, 0, 32) + for i := 0; i < 32; i++ { + values = append(values, fmt.Sprintf("(%v, %v)", rand.Intn(10), rand.Intn(10))) } tk.MustExec(fmt.Sprintf("insert into t values %v", strings.Join(values, ", "))) tk.MustExec(fmt.Sprintf("insert into tnormal values %v", strings.Join(values, ", "))) randRange := func() (int, int) { - a, b := rand.Intn(40), rand.Intn(40) + a, b := rand.Intn(10), rand.Intn(10) if a > b { return b, a } return a, b } - for i := 0; i < 256; i++ { + for i := 0; i < 32; i++ { la, ra := randRange() lb, rb := randRange() cond := fmt.Sprintf("(a between %v and %v) or (b between %v and %v)", la, ra, lb, rb) result := tk.MustQuery("select * from tnormal where " + cond).Sort().Rows() tk.MustQuery("select /*+ USE_INDEX_MERGE(t, a, b) */ * from t where " + cond).Sort().Check(result) } +} + +func TestPartitionTableRandomIndexMerge2(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@tidb_enable_index_merge=1") + tk.MustExec("set @@tidb_partition_prune_mode='dynamic'") // test a table with a primary key tk.MustExec(`create table tpk (a int primary key, b int, key(b)) @@ -193,15 +201,22 @@ func TestPartitionTableRandomIndexMerge(t *testing.T) { partition p2 values less than (20), partition p3 values less than (30), partition p4 values less than (40))`) - tk.MustExec("truncate tnormal") + tk.MustExec(`create table tnormal (a int, b int, key(a), key(b))`) - values = values[:0] - for i := 0; i < 40; i++ { - values = append(values, fmt.Sprintf("(%v, %v)", i, rand.Intn(40))) + randRange := func() (int, int) { + a, b := rand.Intn(10), rand.Intn(10) + if a > b { + return b, a + } + return a, b + } + values := make([]string, 0, 10) + for i := 0; i < 10; i++ { + values = append(values, fmt.Sprintf("(%v, %v)", i, rand.Intn(10))) } tk.MustExec(fmt.Sprintf("insert into tpk values %v", strings.Join(values, ", "))) tk.MustExec(fmt.Sprintf("insert into tnormal values %v", strings.Join(values, ", "))) - for i := 0; i < 256; i++ { + for i := 0; i < 32; i++ { la, ra := randRange() lb, rb := randRange() cond := fmt.Sprintf("(a between %v and %v) or (b between %v and %v)", la, ra, lb, rb) @@ -800,7 +815,7 @@ func TestIntersectionMemQuota(t *testing.T) { defer tk.MustExec("set global tidb_mem_oom_action = DEFAULT") tk.MustExec("set @@tidb_mem_quota_query = 4000") err := tk.QueryToErr("select /*+ use_index_merge(t1, primary, idx1, idx2) */ c1 from t1 where c1 < 1024 and c2 < 1024") - require.Contains(t, err.Error(), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } func setupPartitionTableHelper(tk *testkit.TestKit) { diff --git a/pkg/executor/test/issuetest/BUILD.bazel b/pkg/executor/test/issuetest/BUILD.bazel index 3810bcaefa89c..8c3892640c44f 100644 --- a/pkg/executor/test/issuetest/BUILD.bazel +++ b/pkg/executor/test/issuetest/BUILD.bazel @@ -20,6 +20,7 @@ go_test( "//pkg/session", "//pkg/testkit", "//pkg/util", + "//pkg/util/dbterror/exeerrors", "//pkg/util/memory", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", diff --git a/pkg/executor/test/issuetest/executor_issue_test.go b/pkg/executor/test/issuetest/executor_issue_test.go index f47d02e592c57..96d12ba48332c 100644 --- a/pkg/executor/test/issuetest/executor_issue_test.go +++ b/pkg/executor/test/issuetest/executor_issue_test.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/memory" "github.com/stretchr/testify/require" ) @@ -161,9 +162,9 @@ func TestIssue28650(t *testing.T) { tk.MustExec("set @@tidb_mem_quota_query = 1073741824") // 1GB require.Nil(t, tk.QueryToErr(sql)) tk.MustExec("set @@tidb_mem_quota_query = 33554432") // 32MB, out of memory during executing - require.True(t, strings.Contains(tk.QueryToErr(sql).Error(), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery)) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.QueryToErr(sql))) tk.MustExec("set @@tidb_mem_quota_query = 65536") // 64KB, out of memory during building the plan - require.True(t, strings.Contains(tk.ExecToErr(sql).Error(), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery)) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(tk.ExecToErr(sql))) } } @@ -316,10 +317,10 @@ func TestIndexJoin31494(t *testing.T) { for i := 0; i < 10; i++ { err := tk.QueryToErr("select /*+ inl_join(t1) */ * from t1 right join t2 on t1.b=t2.b;") require.Error(t, err) - require.Regexp(t, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery, err.Error()) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) err = tk.QueryToErr("select /*+ inl_hash_join(t1) */ * from t1 right join t2 on t1.b=t2.b;") require.Error(t, err) - require.Regexp(t, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery, err.Error()) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } } diff --git a/pkg/executor/test/jointest/BUILD.bazel b/pkg/executor/test/jointest/BUILD.bazel index 81f76b3f6f938..5110896f6dff0 100644 --- a/pkg/executor/test/jointest/BUILD.bazel +++ b/pkg/executor/test/jointest/BUILD.bazel @@ -9,14 +9,14 @@ go_test( ], flaky = True, race = "on", - shard_count = 11, + shard_count = 12, deps = [ "//pkg/config", "//pkg/meta/autoid", "//pkg/session", "//pkg/testkit", "//pkg/util", - "//pkg/util/memory", + "//pkg/util/dbterror/exeerrors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", "@com_github_tikv_client_go_v2//tikv", diff --git a/pkg/executor/test/jointest/hashjoin/BUILD.bazel b/pkg/executor/test/jointest/hashjoin/BUILD.bazel index 9459bb1e3014b..2f147f677cc20 100644 --- a/pkg/executor/test/jointest/hashjoin/BUILD.bazel +++ b/pkg/executor/test/jointest/hashjoin/BUILD.bazel @@ -9,7 +9,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 14, + shard_count = 10, deps = [ "//pkg/config", "//pkg/meta/autoid", diff --git a/pkg/executor/test/jointest/hashjoin/hash_join_test.go b/pkg/executor/test/jointest/hashjoin/hash_join_test.go index c7a399892474c..5cedd58288acb 100644 --- a/pkg/executor/test/jointest/hashjoin/hash_join_test.go +++ b/pkg/executor/test/jointest/hashjoin/hash_join_test.go @@ -170,31 +170,6 @@ func TestIndexNestedLoopHashJoin(t *testing.T) { tk.MustExec("drop table orders") } -func TestIssue13449(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t, s;") - tk.MustExec("create table t(a int, index(a));") - tk.MustExec("create table s(a int, index(a));") - for i := 1; i <= 128; i++ { - tk.MustExec(fmt.Sprintf("insert into t values(%d)", i)) - } - tk.MustExec("insert into s values(1), (128)") - tk.MustExec("set @@tidb_max_chunk_size=32;") - tk.MustExec("set @@tidb_index_lookup_join_concurrency=1;") - tk.MustExec("set @@tidb_index_join_batch_size=32;") - - tk.MustQuery("desc format = 'brief' select /*+ INL_HASH_JOIN(s) */ * from t join s on t.a=s.a order by t.a;").Check(testkit.Rows( - "IndexHashJoin 12487.50 root inner join, inner:IndexReader, outer key:test.t.a, inner key:test.s.a, equal cond:eq(test.t.a, test.s.a)", - "├─IndexReader(Build) 9990.00 root index:IndexFullScan", - "│ └─IndexFullScan 9990.00 cop[tikv] table:t, index:a(a) keep order:true, stats:pseudo", - "└─IndexReader(Probe) 12487.50 root index:Selection", - " └─Selection 12487.50 cop[tikv] not(isnull(test.s.a))", - " └─IndexRangeScan 12500.00 cop[tikv] table:s, index:a(a) range: decided by [eq(test.s.a, test.t.a)], keep order:false, stats:pseudo")) - tk.MustQuery("select /*+ INL_HASH_JOIN(s) */ * from t join s on t.a=s.a order by t.a;").Check(testkit.Rows("1 1", "128 128")) -} - func TestHashJoin(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -387,6 +362,27 @@ func TestExplainAnalyzeJoin(t *testing.T) { require.Len(t, rows, 9) require.Regexp(t, "IndexMergeJoin_.*", rows[0][0]) require.Regexp(t, fmt.Sprintf(".*Concurrency:%v.*", tk.Session().GetSessionVars().IndexLookupJoinConcurrency()), rows[0][5]) + + // TestExplainAnalyzeIndexHashJoin + // Issue 43597 + tk.MustExec("drop table if exists t1;") + tk.MustExec("create table t (a int, index idx(a));") + sql := "insert into t values" + for i := 0; i <= 1024; i++ { + if i != 0 { + sql += "," + } + sql += fmt.Sprintf("(%d)", i) + } + tk.MustExec(sql) + for i := 0; i <= 10; i++ { + // Test for index lookup hash join. + rows := tk.MustQuery("explain analyze select /*+ INL_HASH_JOIN(t1, t2) */ * from t t1 join t t2 on t1.a=t2.a limit 1;").Rows() + require.Equal(t, 7, len(rows)) + require.Regexp(t, "IndexHashJoin.*", rows[1][0]) + // When innerWorkerRuntimeStats.join is negative, `join:` will not print. + require.Regexp(t, "time:.*, loops:.*, inner:{total:.*, concurrency:.*, task:.*, construct:.*, fetch:.*, build:.*, join:.*}", rows[1][5]) + } } func TestIssue20270(t *testing.T) { @@ -465,232 +461,3 @@ func TestIssue31129(t *testing.T) { require.NoError(t, failpoint.Disable(fpName1)) require.NoError(t, failpoint.Disable(fpName2)) } - -func TestHashJoinExecEncodeDecodeRow(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.MustExec("drop table if exists t2") - tk.MustExec("create table t1 (id int)") - tk.MustExec("create table t2 (id int, name varchar(255), ts timestamp)") - tk.MustExec("insert into t1 values (1)") - tk.MustExec("insert into t2 values (1, 'xxx', '2003-06-09 10:51:26')") - result := tk.MustQuery("select ts from t1 inner join t2 where t2.name = 'xxx'") - result.Check(testkit.Rows("2003-06-09 10:51:26")) -} - -func TestIndexLookupJoin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("set tidb_cost_model_version=2") - tk.MustExec("set @@tidb_init_chunk_size=2") - tk.MustExec("DROP TABLE IF EXISTS t") - tk.MustExec("CREATE TABLE `t` (`a` int, pk integer auto_increment,`b` char (20),primary key (pk))") - tk.MustExec("CREATE INDEX idx_t_a ON t(`a`)") - tk.MustExec("CREATE INDEX idx_t_b ON t(`b`)") - tk.MustExec("INSERT INTO t VALUES (148307968, DEFAULT, 'nndsjofmpdxvhqv') , (-1327693824, DEFAULT, 'pnndsjofmpdxvhqvfny') , (-277544960, DEFAULT, 'fpnndsjo')") - - tk.MustExec("DROP TABLE IF EXISTS s") - tk.MustExec("CREATE TABLE `s` (`a` int, `b` char (20))") - tk.MustExec("CREATE INDEX idx_s_a ON s(`a`)") - tk.MustExec("INSERT INTO s VALUES (-277544960, 'fpnndsjo') , (2, 'kfpnndsjof') , (2, 'vtdiockfpn'), (-277544960, 'fpnndsjo') , (2, 'kfpnndsjof') , (6, 'ckfp')") - tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960")) - tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960")) - - tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a").Sort().Check(testkit.Rows("-1327693824", "-277544960", "-277544960", "148307968")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a").Sort().Check(testkit.Rows("-1327693824", "-277544960", "-277544960", "148307968")) - tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a").Sort().Check(testkit.Rows("-1327693824", "-277544960", "-277544960", "148307968")) - - tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960").Sort().Check(testkit.Rows("-277544960", "-277544960")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960").Sort().Check(testkit.Rows("-277544960", "-277544960")) - tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960").Sort().Check(testkit.Rows("-277544960", "-277544960")) - - tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t right join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960", "", "", "", "")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t right join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960", "", "", "", "")) - tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t right join s on t.a = s.a").Sort().Check(testkit.Rows("-277544960", "-277544960", "", "", "", "")) - - tk.MustQuery("select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc").Check(testkit.Rows("148307968", "-277544960", "-277544960", "-1327693824")) - tk.MustQuery("select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc").Check(testkit.Rows("148307968", "-277544960", "-277544960", "-1327693824")) - tk.MustQuery("select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc").Check(testkit.Rows("148307968", "-277544960", "-277544960", "-1327693824")) - - tk.MustExec("DROP TABLE IF EXISTS t;") - tk.MustExec("CREATE TABLE t(a BIGINT PRIMARY KEY, b BIGINT);") - tk.MustExec("INSERT INTO t VALUES(1, 2);") - tk.MustQuery("SELECT /*+ INL_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a;").Check(testkit.Rows("1 2 1 2", "1 2 1 2")) - tk.MustQuery("SELECT /*+ INL_HASH_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_HASH_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a;").Check(testkit.Rows("1 2 1 2", "1 2 1 2")) - tk.MustQuery("SELECT /*+ INL_MERGE_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_MERGE_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a;").Check(testkit.Rows("1 2 1 2", "1 2 1 2")) - - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t(a decimal(6,2), index idx(a));`) - tk.MustExec(`insert into t values(1.01), (2.02), (NULL);`) - tk.MustQuery(`select /*+ INL_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a;`).Check(testkit.Rows( - `1.01`, - `2.02`, - )) - tk.MustQuery(`select /*+ INL_HASH_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a;`).Check(testkit.Rows( - `1.01`, - `2.02`, - )) - tk.MustQuery(`select /*+ INL_MERGE_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a;`).Check(testkit.Rows( - `1.01`, - `2.02`, - )) - - tk.MustExec(`drop table if exists t;`) - tk.MustExec(`create table t(a bigint, b bigint, unique key idx1(a, b));`) - tk.MustExec(`insert into t values(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6);`) - tk.MustExec(`set @@tidb_init_chunk_size = 2;`) - tk.MustQuery(`select /*+ INL_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4;`).Check(testkit.Rows( - `1 1 `, - `1 2 `, - `1 3 `, - `1 4 `, - `1 5 1 1`, - `1 6 1 2`, - )) - tk.MustQuery(`select /*+ INL_HASH_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4;`).Check(testkit.Rows( - `1 1 `, - `1 2 `, - `1 3 `, - `1 4 `, - `1 5 1 1`, - `1 6 1 2`, - )) - tk.MustQuery(`select /*+ INL_MERGE_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4;`).Check(testkit.Rows( - `1 1 `, - `1 2 `, - `1 3 `, - `1 4 `, - `1 5 1 1`, - `1 6 1 2`, - )) - - tk.MustExec(`drop table if exists t1, t2, t3;`) - tk.MustExec("create table t1(a int primary key, b int)") - tk.MustExec("insert into t1 values(1, 0), (2, null)") - tk.MustExec("create table t2(a int primary key)") - tk.MustExec("insert into t2 values(0)") - tk.MustQuery("select /*+ INL_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a;").Sort().Check(testkit.Rows( - `1 0 0`, - `2 `, - )) - tk.MustQuery("select /*+ INL_HASH_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a;").Sort().Check(testkit.Rows( - `1 0 0`, - `2 `, - )) - tk.MustQuery("select /*+ INL_MERGE_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a;").Sort().Check(testkit.Rows( - `1 0 0`, - `2 `, - )) - - tk.MustExec("create table t3(a int, key(a))") - tk.MustExec("insert into t3 values(0)") - tk.MustQuery("select /*+ INL_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a;").Check(testkit.Rows( - `1 0 0`, - `2 `, - )) - tk.MustQuery("select /*+ INL_HASH_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a;").Check(testkit.Rows( - `1 0 0`, - `2 `, - )) - tk.MustQuery("select /*+ INL_MERGE_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a;").Check(testkit.Rows( - `2 `, - `1 0 0`, - )) - - tk.MustExec("drop table if exists t,s") - tk.MustExec("create table t(a int primary key auto_increment, b time)") - tk.MustExec("create table s(a int, b time)") - tk.MustExec("alter table s add index idx(a,b)") - tk.MustExec("set @@tidb_index_join_batch_size=4;set @@tidb_init_chunk_size=1;set @@tidb_max_chunk_size=32; set @@tidb_index_lookup_join_concurrency=15;") - tk.MustExec("set @@session.tidb_executor_concurrency = 4;") - tk.MustExec("set @@session.tidb_hash_join_concurrency = 5;") - - // insert 64 rows into `t` - tk.MustExec("insert into t values(0, '01:01:01')") - for i := 0; i < 6; i++ { - tk.MustExec("insert into t select 0, b + 1 from t") - } - tk.MustExec("insert into s select a, b - 1 from t") - tk.MustExec("analyze table t;") - tk.MustExec("analyze table s;") - - tk.MustQuery("desc format = 'brief' select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows( - "HashAgg 1.00 root funcs:count(1)->Column#6", - "└─IndexJoin 64.00 root inner join, inner:IndexReader, outer key:test.t.a, inner key:test.s.a, equal cond:eq(test.t.a, test.s.a), other cond:lt(test.s.b, test.t.b)", - " ├─TableReader(Build) 64.00 root data:Selection", - " │ └─Selection 64.00 cop[tikv] not(isnull(test.t.b))", - " │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false", - " └─IndexReader(Probe) 64.00 root index:Selection", - " └─Selection 64.00 cop[tikv] not(isnull(test.s.a)), not(isnull(test.s.b))", - " └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(test.s.a, test.t.a) lt(test.s.b, test.t.b)], keep order:false")) - tk.MustQuery("select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64")) - tk.MustExec("set @@tidb_index_lookup_join_concurrency=1;") - tk.MustQuery("select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64")) - - tk.MustQuery("desc format = 'brief' select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows( - "HashAgg 1.00 root funcs:count(1)->Column#6", - "└─IndexMergeJoin 64.00 root inner join, inner:IndexReader, outer key:test.t.a, inner key:test.s.a, other cond:lt(test.s.b, test.t.b)", - " ├─TableReader(Build) 64.00 root data:Selection", - " │ └─Selection 64.00 cop[tikv] not(isnull(test.t.b))", - " │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false", - " └─IndexReader(Probe) 64.00 root index:Selection", - " └─Selection 64.00 cop[tikv] not(isnull(test.s.a)), not(isnull(test.s.b))", - " └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(test.s.a, test.t.a) lt(test.s.b, test.t.b)], keep order:true", - )) - tk.MustQuery("select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64")) - tk.MustExec("set @@tidb_index_lookup_join_concurrency=1;") - tk.MustQuery("select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64")) - - tk.MustQuery("desc format = 'brief' select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows( - "HashAgg 1.00 root funcs:count(1)->Column#6", - "└─IndexHashJoin 64.00 root inner join, inner:IndexReader, outer key:test.t.a, inner key:test.s.a, equal cond:eq(test.t.a, test.s.a), other cond:lt(test.s.b, test.t.b)", - " ├─TableReader(Build) 64.00 root data:Selection", - " │ └─Selection 64.00 cop[tikv] not(isnull(test.t.b))", - " │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false", - " └─IndexReader(Probe) 64.00 root index:Selection", - " └─Selection 64.00 cop[tikv] not(isnull(test.s.a)), not(isnull(test.s.b))", - " └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(test.s.a, test.t.a) lt(test.s.b, test.t.b)], keep order:false", - )) - tk.MustQuery("select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64")) - tk.MustExec("set @@tidb_index_lookup_join_concurrency=1;") - tk.MustQuery("select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b").Check(testkit.Rows("64")) - - // issue15658 - tk.MustExec("drop table t1, t2") - tk.MustExec("create table t1(id int primary key)") - tk.MustExec("create table t2(a int, b int)") - tk.MustExec("insert into t1 values(1)") - tk.MustExec("insert into t2 values(1,1),(2,1)") - tk.MustQuery("select /*+ inl_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id;").Check(testkit.Rows("1 1 1")) - tk.MustQuery("select /*+ inl_hash_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id;").Check(testkit.Rows("1 1 1")) - tk.MustQuery("select /*+ inl_merge_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id;").Check(testkit.Rows("1 1 1")) -} - -func TestExplainAnalyzeIndexHashJoin(t *testing.T) { - // Issue 43597 - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t (a int, index idx(a));") - sql := "insert into t values" - for i := 0; i <= 1024; i++ { - if i != 0 { - sql += "," - } - sql += fmt.Sprintf("(%d)", i) - } - tk.MustExec(sql) - for i := 0; i <= 10; i++ { - // Test for index lookup hash join. - rows := tk.MustQuery("explain analyze select /*+ INL_HASH_JOIN(t1, t2) */ * from t t1 join t t2 on t1.a=t2.a limit 1;").Rows() - require.Equal(t, 7, len(rows)) - require.Regexp(t, "IndexHashJoin.*", rows[1][0]) - // When innerWorkerRuntimeStats.join is negative, `join:` will not print. - require.Regexp(t, "time:.*, loops:.*, inner:{total:.*, concurrency:.*, task:.*, construct:.*, fetch:.*, build:.*, join:.*}", rows[1][5]) - } -} diff --git a/pkg/executor/test/jointest/join_test.go b/pkg/executor/test/jointest/join_test.go index c60f1a061785e..a8a634c674ae6 100644 --- a/pkg/executor/test/jointest/join_test.go +++ b/pkg/executor/test/jointest/join_test.go @@ -26,7 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/stretchr/testify/require" ) @@ -1292,14 +1292,16 @@ func TestIssue18070(t *testing.T) { tk.MustExec("insert into t1 values(1),(2)") tk.MustExec("insert into t2 values(1),(1),(2),(2)") tk.MustExec("set @@tidb_mem_quota_query=1000") - tk.MustContainErrMsg("select /*+ inl_hash_join(t1)*/ * from t1 join t2 on t1.a = t2.a;", memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery) + err := tk.ExecToErr("select /*+ inl_hash_join(t1)*/ * from t1 join t2 on t1.a = t2.a;") + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) fpName := "github.com/pingcap/tidb/pkg/executor/mockIndexMergeJoinOOMPanic" require.NoError(t, failpoint.Enable(fpName, `panic("ERROR 1105 (HY000): Out Of Memory Quota![conn=1]")`)) defer func() { require.NoError(t, failpoint.Disable(fpName)) }() - tk.MustContainErrMsg("select /*+ inl_merge_join(t1)*/ * from t1 join t2 on t1.a = t2.a;", memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery) + err = tk.ExecToErr("select /*+ inl_merge_join(t1)*/ * from t1 join t2 on t1.a = t2.a;") + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } func TestIssue20779(t *testing.T) { @@ -1348,10 +1350,10 @@ func TestIssue30211(t *testing.T) { tk.MustExec("set tidb_index_join_batch_size = 1;") tk.MustExec("SET GLOBAL tidb_mem_oom_action = 'CANCEL'") defer tk.MustExec("SET GLOBAL tidb_mem_oom_action='LOG'") - err := tk.QueryToErr("select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a;").Error() - require.True(t, strings.Contains(err, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery)) - err = tk.QueryToErr("select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a;").Error() - require.True(t, strings.Contains(err, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery)) + err := tk.QueryToErr("select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a;") + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) + err = tk.QueryToErr("select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a;") + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } func TestIssue37932(t *testing.T) { @@ -1435,3 +1437,19 @@ func TestIssue37932(t *testing.T) { } require.NoError(t, err) } + +func TestCartesianJoinPanic(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("create table t(a int)") + tk.MustExec("insert into t values(1)") + tk.MustExec("set tidb_mem_quota_query = 1 << 20") + tk.MustExec("set global tidb_mem_oom_action = 'CANCEL'") + tk.MustExec("set global tidb_enable_tmp_storage_on_oom = off;") + for i := 0; i < 10; i++ { + tk.MustExec("insert into t select * from t") + } + err := tk.QueryToErr("desc analyze select * from t t1, t t2, t t3, t t4, t t5, t t6;") + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) +} diff --git a/pkg/executor/test/partitiontest/BUILD.bazel b/pkg/executor/test/partitiontest/BUILD.bazel index 1f2f0adfeeb71..eba32c340a02d 100644 --- a/pkg/executor/test/partitiontest/BUILD.bazel +++ b/pkg/executor/test/partitiontest/BUILD.bazel @@ -9,7 +9,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 5, + shard_count = 4, deps = [ "//pkg/testkit", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/executor/test/partitiontest/partition_test.go b/pkg/executor/test/partitiontest/partition_test.go index 28e9002060e27..cbffd97769b9e 100644 --- a/pkg/executor/test/partitiontest/partition_test.go +++ b/pkg/executor/test/partitiontest/partition_test.go @@ -447,57 +447,3 @@ func TestPartitionedTableDelete(t *testing.T) { tk.CheckExecResult(1, 0) tk.MustExec(`drop table t1;`) } - -func TestPartitionOnMissing(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("create schema OnMissing") - tk.MustExec("use OnMissing") - tk.MustExec(`set global tidb_partition_prune_mode='dynamic'`) - tk.MustExec(`set session tidb_partition_prune_mode='dynamic'`) - - tk.MustExec(`CREATE TABLE tt1 ( - id INT NOT NULL, - listid INT, - name varchar(10), - primary key (listid) clustered - ) - PARTITION BY LIST (listid) ( - PARTITION p1 VALUES IN (1), - PARTITION p2 VALUES IN (2), - PARTITION p3 VALUES IN (3), - PARTITION p4 VALUES IN (4) - )`) - - tk.MustExec(`CREATE TABLE tt2 ( - id INT NOT NULL, - listid INT - )`) - - tk.MustExec(`create index idx_listid on tt1(id,listid)`) - tk.MustExec(`create index idx_listid on tt2(listid)`) - - tk.MustExec(`insert into tt1 values(1,1,1)`) - tk.MustExec(`insert into tt1 values(2,2,2)`) - tk.MustExec(`insert into tt1 values(3,3,3)`) - tk.MustExec(`insert into tt1 values(4,4,4)`) - tk.MustExec(`insert into tt2 values(1,1)`) - tk.MustExec(`insert into tt2 values(2,2)`) - tk.MustExec(`insert into tt2 values(3,3)`) - tk.MustExec(`insert into tt2 values(4,4)`) - tk.MustExec(`insert into tt2 values(5,5)`) - - tk.MustExec(`analyze table tt1`) - tk.MustExec(`analyze table tt2`) - - tk.MustQuery(`select /*+ inl_join(tt1)*/ count(*) from tt2 - left join tt1 on tt1.listid=tt2.listid and tt1.id=tt2.id`).Check(testkit.Rows("5")) - tk.MustQuery(`select /*+ inl_join(tt1)*/ count(*) from tt2 - left join tt1 on tt1.listid=tt2.listid`).Check(testkit.Rows("5")) - tk.MustQuery(`explain format = 'brief' select /*+ inl_join(tt1)*/ count(*) from tt2 - left join tt1 on tt1.listid=tt2.listid`).Check(testkit.Rows(""+ - "StreamAgg 1.00 root funcs:count(Column#13)->Column#7", - "└─IndexReader 1.00 root index:StreamAgg", - " └─StreamAgg 1.00 cop[tikv] funcs:count(1)->Column#13", - " └─IndexFullScan 5.00 cop[tikv] table:tt2, index:idx_listid(listid) keep order:false")) -} diff --git a/pkg/executor/test/passwordtest/BUILD.bazel b/pkg/executor/test/passwordtest/BUILD.bazel index 6d9d3c6cc187d..9763605022218 100644 --- a/pkg/executor/test/passwordtest/BUILD.bazel +++ b/pkg/executor/test/passwordtest/BUILD.bazel @@ -18,7 +18,7 @@ go_test( "//pkg/privilege/privileges", "//pkg/sessionctx/variable", "//pkg/testkit", - "//pkg/util/sqlexec", + "//pkg/util/sqlescape", "@com_github_stretchr_testify//require", "@org_uber_go_goleak//:goleak", ], diff --git a/pkg/executor/test/passwordtest/password_management_test.go b/pkg/executor/test/passwordtest/password_management_test.go index e1d884b92bc55..59b6f04cb44f3 100644 --- a/pkg/executor/test/passwordtest/password_management_test.go +++ b/pkg/executor/test/passwordtest/password_management_test.go @@ -32,7 +32,7 @@ import ( "github.com/pingcap/tidb/pkg/privilege/privileges" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util/sqlexec" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/stretchr/testify/require" ) @@ -1118,7 +1118,7 @@ func TestFailedLoginTrackingAlterUser(t *testing.T) { rootTK.MustExec(`CREATE USER test1 IDENTIFIED BY '1234' FAILED_LOGIN_ATTEMPTS 3 PASSWORD_LOCK_TIME 3 COMMENT 'test'`) err = tk.Session().Auth(&auth.UserIdentity{Username: "test1", Hostname: "%"}, sha1Password("1234"), nil, nil) require.NoError(t, err) - sqlexec.MustFormatSQL(sql, checkUserAttributes, "test1", "%") + sqlescape.MustFormatSQL(sql, checkUserAttributes, "test1", "%") rootTK.MustQuery(sql.String()).Check(testkit.Rows(`3 3 {"comment": "test"}`)) tk = testkit.NewTestKit(t, store) err = tk.Session().Auth(&auth.UserIdentity{Username: "test1", Hostname: "%"}, sha1Password(""), nil, nil) @@ -1284,7 +1284,7 @@ func checkUserUserAttributes(tk *testkit.TestKit, user, host, row string) { "JSON_EXTRACT(user_attributes, '$.Password_locking.password_lock_time_days')," + "JSON_EXTRACT(user_attributes, '$.metadata')from mysql.user where user= %? and host = %?" userAttributesSQL := new(strings.Builder) - sqlexec.MustFormatSQL(userAttributesSQL, sqlTemplate, user, host) + sqlescape.MustFormatSQL(userAttributesSQL, sqlTemplate, user, host) tk.MustQuery(userAttributesSQL.String()).Check(testkit.Rows(row)) } @@ -1341,7 +1341,7 @@ func checkAuthUser(t *testing.T, tk *testkit.TestKit, user string, failedLoginCo func selectSQL(user string) string { userAttributesSQL := new(strings.Builder) - sqlexec.MustFormatSQL(userAttributesSQL, "SELECT user_attributes from mysql.user WHERE USER = %? AND HOST = 'localhost' for update", user) + sqlescape.MustFormatSQL(userAttributesSQL, "SELECT user_attributes from mysql.user WHERE USER = %? AND HOST = 'localhost' for update", user) return userAttributesSQL.String() } diff --git a/pkg/executor/test/seqtest/BUILD.bazel b/pkg/executor/test/seqtest/BUILD.bazel index 7367f81c9de15..644e9f4aa79ef 100644 --- a/pkg/executor/test/seqtest/BUILD.bazel +++ b/pkg/executor/test/seqtest/BUILD.bazel @@ -10,7 +10,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 37, + shard_count = 33, deps = [ "//pkg/config", "//pkg/ddl/testutil", diff --git a/pkg/executor/test/seqtest/seq_executor_test.go b/pkg/executor/test/seqtest/seq_executor_test.go index 9b2885da5b97d..363f97c3b2ec4 100644 --- a/pkg/executor/test/seqtest/seq_executor_test.go +++ b/pkg/executor/test/seqtest/seq_executor_test.go @@ -1188,72 +1188,6 @@ func TestCoprocessorPriority(t *testing.T) { cli.mu.Unlock() } -func TestShowForNewCollations(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - expectRows := testkit.Rows( - "ascii_bin ascii 65 Yes Yes 1", - "binary binary 63 Yes Yes 1", - "gbk_bin gbk 87 Yes 1", - "gbk_chinese_ci gbk 28 Yes Yes 1", - "latin1_bin latin1 47 Yes Yes 1", - "utf8_bin utf8 83 Yes Yes 1", - "utf8_general_ci utf8 33 Yes 1", - "utf8_unicode_ci utf8 192 Yes 1", - "utf8mb4_0900_ai_ci utf8mb4 255 Yes 1", - "utf8mb4_0900_bin utf8mb4 309 Yes 1", - "utf8mb4_bin utf8mb4 46 Yes Yes 1", - "utf8mb4_general_ci utf8mb4 45 Yes 1", - "utf8mb4_unicode_ci utf8mb4 224 Yes 1", - ) - tk.MustQuery("show collation").Check(expectRows) - tk.MustQuery("select * from information_schema.COLLATIONS").Check(expectRows) - tk.MustQuery("show character set like '%utf8mb4%'").Check(testkit.Rows("utf8mb4 UTF-8 Unicode utf8mb4_bin 4")) - tk.MustQuery("select * from information_schema.COLLATIONS where IS_DEFAULT='Yes' and CHARACTER_SET_NAME='utf8mb4'").Check(testkit.Rows("utf8mb4_bin utf8mb4 46 Yes Yes 1")) - // update default_collation_for_utf8mb4 - tk.MustExec("set @@session.default_collation_for_utf8mb4='utf8mb4_0900_ai_ci';") - tk.MustQuery("show variables like 'default_collation_for_utf8mb4';").Check(testkit.Rows("default_collation_for_utf8mb4 utf8mb4_0900_ai_ci")) - expectRows1 := testkit.Rows( - "ascii_bin ascii 65 Yes Yes 1", - "binary binary 63 Yes Yes 1", - "gbk_bin gbk 87 Yes 1", - "gbk_chinese_ci gbk 28 Yes Yes 1", - "latin1_bin latin1 47 Yes Yes 1", - "utf8_bin utf8 83 Yes Yes 1", - "utf8_general_ci utf8 33 Yes 1", - "utf8_unicode_ci utf8 192 Yes 1", - "utf8mb4_0900_ai_ci utf8mb4 255 Yes Yes 1", - "utf8mb4_0900_bin utf8mb4 309 Yes 1", - "utf8mb4_bin utf8mb4 46 Yes 1", - "utf8mb4_general_ci utf8mb4 45 Yes 1", - "utf8mb4_unicode_ci utf8mb4 224 Yes 1", - ) - tk.MustQuery("show collation").Check(expectRows1) - tk.MustQuery("select * from information_schema.COLLATIONS").Check(expectRows) - tk.MustQuery("show character set like '%utf8mb4%'").Check(testkit.Rows("utf8mb4 UTF-8 Unicode utf8mb4_0900_ai_ci 4")) - tk.MustQuery("select * from information_schema.COLLATIONS where IS_DEFAULT='Yes' and CHARACTER_SET_NAME='utf8mb4'").Check(testkit.Rows("utf8mb4_bin utf8mb4 46 Yes Yes 1")) -} - -func TestForbidUnsupportedCollations(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - mustGetUnsupportedCollation := func(sql string, coll string) { - tk.MustGetErrMsg(sql, fmt.Sprintf("[ddl:1273]Unsupported collation when new collation is enabled: '%s'", coll)) - } - - mustGetUnsupportedCollation("select 'a' collate utf8_roman_ci", "utf8_roman_ci") - mustGetUnsupportedCollation("select cast('a' as char) collate utf8_roman_ci", "utf8_roman_ci") - mustGetUnsupportedCollation("set names utf8 collate utf8_roman_ci", "utf8_roman_ci") - mustGetUnsupportedCollation("set session collation_server = 'utf8_roman_ci'", "utf8_roman_ci") - mustGetUnsupportedCollation("set session collation_database = 'utf8_roman_ci'", "utf8_roman_ci") - mustGetUnsupportedCollation("set session collation_connection = 'utf8_roman_ci'", "utf8_roman_ci") - mustGetUnsupportedCollation("set global collation_server = 'utf8_roman_ci'", "utf8_roman_ci") - mustGetUnsupportedCollation("set global collation_database = 'utf8_roman_ci'", "utf8_roman_ci") - mustGetUnsupportedCollation("set global collation_connection = 'utf8_roman_ci'", "utf8_roman_ci") -} - func TestAutoIncIDInRetry(t *testing.T) { store := testkit.CreateMockStore(t) @@ -1463,33 +1397,6 @@ func TestAutoRandRecoverTable(t *testing.T) { require.Equal(t, []int64{1, 2, 3, autoRandIDStep + 1, autoRandIDStep + 2, autoRandIDStep + 3}, ordered) } -func TestMaxDeltaSchemaCount(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - require.Equal(t, int64(variable.DefTiDBMaxDeltaSchemaCount), variable.GetMaxDeltaSchemaCount()) - - tk.MustExec("set @@global.tidb_max_delta_schema_count= -1") - tk.MustQuery("show warnings;").Check(testkit.Rows("Warning 1292 Truncated incorrect tidb_max_delta_schema_count value: '-1'")) - // Make sure a new session will load global variables. - tk.RefreshSession() - tk.MustExec("use test") - require.Equal(t, int64(100), variable.GetMaxDeltaSchemaCount()) - tk.MustExec(fmt.Sprintf("set @@global.tidb_max_delta_schema_count= %v", uint64(math.MaxInt64))) - tk.MustQuery("show warnings;").Check(testkit.Rows(fmt.Sprintf("Warning 1292 Truncated incorrect tidb_max_delta_schema_count value: '%d'", uint64(math.MaxInt64)))) - tk.RefreshSession() - tk.MustExec("use test") - require.Equal(t, int64(16384), variable.GetMaxDeltaSchemaCount()) - tk.MustGetErrCode("set @@global.tidb_max_delta_schema_count= invalid_val", errno.ErrWrongTypeForVar) - - tk.MustExec("set @@global.tidb_max_delta_schema_count= 2048") - tk.RefreshSession() - tk.MustExec("use test") - require.Equal(t, int64(2048), variable.GetMaxDeltaSchemaCount()) - tk.MustQuery("select @@global.tidb_max_delta_schema_count").Check(testkit.Rows("2048")) -} - func TestOOMPanicInHashJoinWhenFetchBuildRows(t *testing.T) { store := testkit.CreateMockStore(t) @@ -1556,30 +1463,6 @@ func TestIssue18744(t *testing.T) { require.EqualError(t, err, "mockIndexHashJoinOuterWorkerErr") } -func TestIssue19410(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t, t1, t2, t3;") - tk.MustExec("create table t(a int, b enum('A', 'B'));") - tk.MustExec("create table t1(a1 int, b1 enum('B', 'A') NOT NULL, UNIQUE KEY (b1));") - tk.MustExec("insert into t values (1, 'A');") - tk.MustExec("insert into t1 values (1, 'A');") - tk.MustQuery("select /*+ INL_HASH_JOIN(t1) */ * from t join t1 on t.b = t1.b1;").Check(testkit.Rows("1 A 1 A")) - tk.MustQuery("select /*+ INL_JOIN(t1) */ * from t join t1 on t.b = t1.b1;").Check(testkit.Rows("1 A 1 A")) - - tk.MustExec("create table t2(a1 int, b1 enum('C', 'D') NOT NULL, UNIQUE KEY (b1));") - tk.MustExec("insert into t2 values (1, 'C');") - tk.MustQuery("select /*+ INL_HASH_JOIN(t2) */ * from t join t2 on t.b = t2.b1;").Check(testkit.Rows()) - tk.MustQuery("select /*+ INL_JOIN(t2) */ * from t join t2 on t.b = t2.b1;").Check(testkit.Rows()) - - tk.MustExec("create table t3(a1 int, b1 enum('A', 'B') NOT NULL, UNIQUE KEY (b1));") - tk.MustExec("insert into t3 values (1, 'A');") - tk.MustQuery("select /*+ INL_HASH_JOIN(t3) */ * from t join t3 on t.b = t3.b1;").Check(testkit.Rows("1 A 1 A")) - tk.MustQuery("select /*+ INL_JOIN(t3) */ * from t join t3 on t.b = t3.b1;").Check(testkit.Rows("1 A 1 A")) -} - func TestAnalyzeNextRawErrorNoLeak(t *testing.T) { store := testkit.CreateMockStore(t) diff --git a/pkg/executor/test/showtest/BUILD.bazel b/pkg/executor/test/showtest/BUILD.bazel index 9c1a33744aa29..05eafb29e98fd 100644 --- a/pkg/executor/test/showtest/BUILD.bazel +++ b/pkg/executor/test/showtest/BUILD.bazel @@ -8,7 +8,7 @@ go_test( "show_test.go", ], flaky = True, - shard_count = 45, + shard_count = 27, deps = [ "//pkg/autoid_service", "//pkg/config", diff --git a/pkg/executor/test/showtest/show_test.go b/pkg/executor/test/showtest/show_test.go index ccbd562a35bf1..0dbdd57f6ad98 100644 --- a/pkg/executor/test/showtest/show_test.go +++ b/pkg/executor/test/showtest/show_test.go @@ -39,471 +39,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestShowHistogramsInFlight(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - result := tk.MustQuery("show histograms_in_flight") - rows := result.Rows() - require.Len(t, rows, 1) - require.Equal(t, rows[0][0], "0") -} - -func TestShowOpenTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("show open tables") - tk.MustQuery("show open tables in test") -} - -func TestShowCreateViewDefiner(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%", AuthUsername: "root", AuthHostname: "%"}, nil, nil, nil)) - - tk.MustExec("use test") - tk.MustExec("create or replace view v1 as select 1") - tk.MustQuery("show create view v1").Check(testkit.RowsWithSep("|", "v1|CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v1` (`1`) AS SELECT 1 AS `1`|utf8mb4|utf8mb4_bin")) - tk.MustExec("drop view v1") -} - -func TestShowCreateTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("use test") - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1(a int,b int)") - tk.MustExec("drop view if exists v1") - tk.MustExec("create or replace definer=`root`@`127.0.0.1` view v1 as select * from t1") - tk.MustQuery("show create table v1").Check(testkit.RowsWithSep("|", "v1|CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v1` (`a`, `b`) AS SELECT `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` FROM `test`.`t1`|utf8mb4|utf8mb4_bin")) - tk.MustQuery("show create view v1").Check(testkit.RowsWithSep("|", "v1|CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v1` (`a`, `b`) AS SELECT `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` FROM `test`.`t1`|utf8mb4|utf8mb4_bin")) - tk.MustExec("drop view v1") - tk.MustExec("drop table t1") - - tk.MustExec("drop view if exists v") - tk.MustExec("create or replace definer=`root`@`127.0.0.1` view v as select JSON_MERGE('{}', '{}') as col;") - tk.MustQuery("show create view v").Check(testkit.RowsWithSep("|", "v|CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v` (`col`) AS SELECT JSON_MERGE(_UTF8MB4'{}', _UTF8MB4'{}') AS `col`|utf8mb4|utf8mb4_bin")) - tk.MustExec("drop view if exists v") - - tk.MustExec("drop table if exists t1") - tk.MustExec("create table t1(a int,b int)") - tk.MustExec("create or replace definer=`root`@`127.0.0.1` view v1 as select avg(a),t1.* from t1 group by a") - tk.MustQuery("show create view v1").Check(testkit.RowsWithSep("|", "v1|CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v1` (`avg(a)`, `a`, `b`) AS SELECT AVG(`a`) AS `avg(a)`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` FROM `test`.`t1` GROUP BY `a`|utf8mb4|utf8mb4_bin")) - tk.MustExec("drop view v1") - tk.MustExec("create or replace definer=`root`@`127.0.0.1` view v1 as select a+b, t1.* , a as c from t1") - tk.MustQuery("show create view v1").Check(testkit.RowsWithSep("|", "v1|CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v1` (`a+b`, `a`, `b`, `c`) AS SELECT `a`+`b` AS `a+b`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`a` AS `c` FROM `test`.`t1`|utf8mb4|utf8mb4_bin")) - tk.MustExec("drop table t1") - tk.MustExec("drop view v1") - - // For issue #9211 - tk.MustExec("create table t(c int, b int as (c + 1))ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;") - tk.MustQuery("show create table `t`").Check(testkit.RowsWithSep("|", - ""+ - "t CREATE TABLE `t` (\n"+ - " `c` int(11) DEFAULT NULL,\n"+ - " `b` int(11) GENERATED ALWAYS AS (`c` + 1) VIRTUAL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - - tk.MustExec("drop table t") - tk.MustExec("create table t(c int, b int as (c + 1) not null)ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;") - tk.MustQuery("show create table `t`").Check(testkit.RowsWithSep("|", - ""+ - "t CREATE TABLE `t` (\n"+ - " `c` int(11) DEFAULT NULL,\n"+ - " `b` int(11) GENERATED ALWAYS AS (`c` + 1) VIRTUAL NOT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - tk.MustExec("drop table t") - tk.MustExec("create table t ( a char(10) charset utf8 collate utf8_bin, b char(10) as (rtrim(a)));") - tk.MustQuery("show create table `t`").Check(testkit.RowsWithSep("|", - ""+ - "t CREATE TABLE `t` (\n"+ - " `a` char(10) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,\n"+ - " `b` char(10) GENERATED ALWAYS AS (rtrim(`a`)) VIRTUAL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - tk.MustExec("drop table t") - - tk.MustExec(`drop table if exists different_charset`) - tk.MustExec(`create table different_charset(ch1 varchar(10) charset utf8, ch2 varchar(10) charset binary);`) - tk.MustQuery(`show create table different_charset`).Check(testkit.RowsWithSep("|", - ""+ - "different_charset CREATE TABLE `different_charset` (\n"+ - " `ch1` varchar(10) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL,\n"+ - " `ch2` varbinary(10) DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table `t` (\n" + - "`a` timestamp not null default current_timestamp,\n" + - "`b` timestamp(3) default current_timestamp(3),\n" + - "`c` datetime default current_timestamp,\n" + - "`d` datetime(4) default current_timestamp(4),\n" + - "`e` varchar(20) default 'cUrrent_tImestamp',\n" + - "`f` datetime(2) default current_timestamp(2) on update current_timestamp(2),\n" + - "`g` timestamp(2) default current_timestamp(2) on update current_timestamp(2),\n" + - "`h` date default current_date )") - tk.MustQuery("show create table `t`").Check(testkit.RowsWithSep("|", - ""+ - "t CREATE TABLE `t` (\n"+ - " `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,\n"+ - " `b` timestamp(3) DEFAULT CURRENT_TIMESTAMP(3),\n"+ - " `c` datetime DEFAULT CURRENT_TIMESTAMP,\n"+ - " `d` datetime(4) DEFAULT CURRENT_TIMESTAMP(4),\n"+ - " `e` varchar(20) DEFAULT 'cUrrent_tImestamp',\n"+ - " `f` datetime(2) DEFAULT CURRENT_TIMESTAMP(2) ON UPDATE CURRENT_TIMESTAMP(2),\n"+ - " `g` timestamp(2) DEFAULT CURRENT_TIMESTAMP(2) ON UPDATE CURRENT_TIMESTAMP(2),\n"+ - " `h` date DEFAULT CURRENT_DATE\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - tk.MustExec("drop table t") - - tk.MustExec("create table t (a int, b int) shard_row_id_bits = 4 pre_split_regions=3;") - tk.MustQuery("show create table `t`").Check(testkit.RowsWithSep("|", - ""+ - "t CREATE TABLE `t` (\n"+ - " `a` int(11) DEFAULT NULL,\n"+ - " `b` int(11) DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T! SHARD_ROW_ID_BITS=4 PRE_SPLIT_REGIONS=3 */", - )) - tk.MustExec("drop table t") - - // for issue #20446 - tk.MustExec("drop table if exists t1;") - tk.MustExec("create table t1(c int unsigned default 0);") - tk.MustQuery("show create table `t1`").Check(testkit.RowsWithSep("|", - ""+ - "t1 CREATE TABLE `t1` (\n"+ - " `c` int(10) unsigned DEFAULT '0'\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - tk.MustExec("drop table t1") - - tk.MustExec("CREATE TABLE `log` (" + - "`LOG_ID` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT," + - "`ROUND_ID` bigint(20) UNSIGNED NOT NULL," + - "`USER_ID` int(10) UNSIGNED NOT NULL," + - "`USER_IP` int(10) UNSIGNED DEFAULT NULL," + - "`END_TIME` datetime NOT NULL," + - "`USER_TYPE` int(11) DEFAULT NULL," + - "`APP_ID` int(11) DEFAULT NULL," + - "PRIMARY KEY (`LOG_ID`,`END_TIME`) NONCLUSTERED," + - "KEY `IDX_EndTime` (`END_TIME`)," + - "KEY `IDX_RoundId` (`ROUND_ID`)," + - "KEY `IDX_UserId_EndTime` (`USER_ID`,`END_TIME`)" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=505488 " + - "PARTITION BY RANGE ( month(`end_time`) ) (" + - "PARTITION `p1` VALUES LESS THAN (2)," + - "PARTITION `p2` VALUES LESS THAN (3)," + - "PARTITION `p3` VALUES LESS THAN (4)," + - "PARTITION `p4` VALUES LESS THAN (5)," + - "PARTITION `p5` VALUES LESS THAN (6)," + - "PARTITION `p6` VALUES LESS THAN (7)," + - "PARTITION `p7` VALUES LESS THAN (8)," + - "PARTITION `p8` VALUES LESS THAN (9)," + - "PARTITION `p9` VALUES LESS THAN (10)," + - "PARTITION `p10` VALUES LESS THAN (11)," + - "PARTITION `p11` VALUES LESS THAN (12)," + - "PARTITION `p12` VALUES LESS THAN (MAXVALUE))") - tk.MustQuery("show create table log").Check(testkit.RowsWithSep("|", - "log CREATE TABLE `log` (\n"+ - " `LOG_ID` bigint(20) unsigned NOT NULL AUTO_INCREMENT,\n"+ - " `ROUND_ID` bigint(20) unsigned NOT NULL,\n"+ - " `USER_ID` int(10) unsigned NOT NULL,\n"+ - " `USER_IP` int(10) unsigned DEFAULT NULL,\n"+ - " `END_TIME` datetime NOT NULL,\n"+ - " `USER_TYPE` int(11) DEFAULT NULL,\n"+ - " `APP_ID` int(11) DEFAULT NULL,\n"+ - " PRIMARY KEY (`LOG_ID`,`END_TIME`) /*T![clustered_index] NONCLUSTERED */,\n"+ - " KEY `IDX_EndTime` (`END_TIME`),\n"+ - " KEY `IDX_RoundId` (`ROUND_ID`),\n"+ - " KEY `IDX_UserId_EndTime` (`USER_ID`,`END_TIME`)\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=505488\n"+ - "PARTITION BY RANGE (MONTH(`end_time`))\n"+ - "(PARTITION `p1` VALUES LESS THAN (2),\n"+ - " PARTITION `p2` VALUES LESS THAN (3),\n"+ - " PARTITION `p3` VALUES LESS THAN (4),\n"+ - " PARTITION `p4` VALUES LESS THAN (5),\n"+ - " PARTITION `p5` VALUES LESS THAN (6),\n"+ - " PARTITION `p6` VALUES LESS THAN (7),\n"+ - " PARTITION `p7` VALUES LESS THAN (8),\n"+ - " PARTITION `p8` VALUES LESS THAN (9),\n"+ - " PARTITION `p9` VALUES LESS THAN (10),\n"+ - " PARTITION `p10` VALUES LESS THAN (11),\n"+ - " PARTITION `p11` VALUES LESS THAN (12),\n"+ - " PARTITION `p12` VALUES LESS THAN (MAXVALUE))")) - - // for issue #11831 - tk.MustExec("create table ttt4(a varchar(123) default null collate utf8mb4_unicode_ci)engine=innodb default charset=utf8mb4 collate=utf8mb4_unicode_ci;") - tk.MustQuery("show create table `ttt4`").Check(testkit.RowsWithSep("|", - ""+ - "ttt4 CREATE TABLE `ttt4` (\n"+ - " `a` varchar(123) COLLATE utf8mb4_unicode_ci DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci", - )) - tk.MustExec("create table ttt5(a varchar(123) default null)engine=innodb default charset=utf8mb4 collate=utf8mb4_bin;") - tk.MustQuery("show create table `ttt5`").Check(testkit.RowsWithSep("|", - ""+ - "ttt5 CREATE TABLE `ttt5` (\n"+ - " `a` varchar(123) DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - - // for expression index - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, b real);") - tk.MustExec("alter table t add index expr_idx((a*b+1));") - tk.MustQuery("show create table t;").Check(testkit.RowsWithSep("|", - ""+ - "t CREATE TABLE `t` (\n"+ - " `a` int(11) DEFAULT NULL,\n"+ - " `b` double DEFAULT NULL,\n"+ - " KEY `expr_idx` ((`a` * `b` + 1))\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - - // Fix issue #15175, show create table sequence_name. - tk.MustExec("drop sequence if exists seq") - tk.MustExec("create sequence seq") - tk.MustQuery("show create table seq;").Check(testkit.Rows("seq CREATE SEQUENCE `seq` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB")) - - // Test for issue #15633, 'binary' collation should be ignored in the result of 'show create table'. - tk.MustExec(`drop table if exists binary_collate`) - tk.MustExec(`create table binary_collate(a varchar(10)) default collate=binary;`) - tk.MustQuery(`show create table binary_collate`).Check(testkit.RowsWithSep("|", - ""+ - "binary_collate CREATE TABLE `binary_collate` (\n"+ - " `a` varbinary(10) DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=binary", // binary collate is ignored - )) - tk.MustExec(`drop table if exists binary_collate`) - tk.MustExec(`create table binary_collate(a varchar(10)) default charset=binary collate=binary;`) - tk.MustQuery(`show create table binary_collate`).Check(testkit.RowsWithSep("|", - ""+ - "binary_collate CREATE TABLE `binary_collate` (\n"+ - " `a` varbinary(10) DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=binary", // binary collate is ignored - )) - tk.MustExec(`drop table if exists binary_collate`) - tk.MustExec(`create table binary_collate(a varchar(10)) default charset=utf8mb4 collate=utf8mb4_bin;`) - tk.MustQuery(`show create table binary_collate`).Check(testkit.RowsWithSep("|", - ""+ - "binary_collate CREATE TABLE `binary_collate` (\n"+ - " `a` varchar(10) DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", // non-binary collate is kept. - )) - // Test for issue #17 in bug competition, default num and sequence should be shown without quote. - tk.MustExec(`drop table if exists default_num`) - tk.MustExec("create table default_num(a int default 11)") - tk.MustQuery("show create table default_num").Check(testkit.RowsWithSep("|", - ""+ - "default_num CREATE TABLE `default_num` (\n"+ - " `a` int(11) DEFAULT '11'\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - tk.MustExec(`drop table if exists default_varchar`) - tk.MustExec("create table default_varchar(a varchar(10) default \"haha\")") - tk.MustQuery("show create table default_varchar").Check(testkit.RowsWithSep("|", - ""+ - "default_varchar CREATE TABLE `default_varchar` (\n"+ - " `a` varchar(10) DEFAULT 'haha'\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - tk.MustExec(`drop table if exists default_sequence`) - tk.MustExec("create table default_sequence(a int default nextval(seq))") - tk.MustQuery("show create table default_sequence").Check(testkit.RowsWithSep("|", - ""+ - "default_sequence CREATE TABLE `default_sequence` (\n"+ - " `a` int(11) DEFAULT nextval(`test`.`seq`)\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - - // set @@foreign_key_checks=0, - // This means that the child table can be created before the parent table. - // This behavior is required for mysqldump restores. - tk.MustExec("set @@foreign_key_checks=0") - tk.MustExec(`DROP TABLE IF EXISTS parent, child`) - tk.MustExec(`CREATE TABLE child (id INT NOT NULL PRIMARY KEY auto_increment, parent_id INT NOT NULL, INDEX par_ind (parent_id), CONSTRAINT child_ibfk_1 FOREIGN KEY (parent_id) REFERENCES parent(id))`) - tk.MustExec(`CREATE TABLE parent ( id INT NOT NULL PRIMARY KEY auto_increment )`) - tk.MustQuery(`show create table child`).Check(testkit.RowsWithSep("|", - ""+ - "child CREATE TABLE `child` (\n"+ - " `id` int(11) NOT NULL AUTO_INCREMENT,\n"+ - " `parent_id` int(11) NOT NULL,\n"+ - " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */,\n"+ - " KEY `par_ind` (`parent_id`),\n"+ - " CONSTRAINT `child_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `test`.`parent` (`id`)\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - - // Test Foreign keys + ON DELETE / ON UPDATE - tk.MustExec(`DROP TABLE child`) - tk.MustExec(`CREATE TABLE child (id INT NOT NULL PRIMARY KEY auto_increment, parent_id INT NOT NULL, INDEX par_ind (parent_id), CONSTRAINT child_ibfk_1 FOREIGN KEY (parent_id) REFERENCES parent(id) ON DELETE RESTRICT ON UPDATE CASCADE)`) - tk.MustQuery(`show create table child`).Check(testkit.RowsWithSep("|", - ""+ - "child CREATE TABLE `child` (\n"+ - " `id` int(11) NOT NULL AUTO_INCREMENT,\n"+ - " `parent_id` int(11) NOT NULL,\n"+ - " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */,\n"+ - " KEY `par_ind` (`parent_id`),\n"+ - " CONSTRAINT `child_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `test`.`parent` (`id`) ON DELETE RESTRICT ON UPDATE CASCADE\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - - // Test Foreign key refer other database table. - tk.MustExec("create database test1") - tk.MustExec("create database test2") - tk.MustExec("create table test1.t1 (id int key, b int, index(b));") - tk.MustExec("create table test2.t2 (id int key, b int, foreign key fk(b) references test1.t1(id));") - tk.MustQuery("show create table test2.t2").Check(testkit.Rows("t2 CREATE TABLE `t2` (\n" + - " `id` int(11) NOT NULL,\n" + - " `b` int(11) DEFAULT NULL,\n" + - " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */,\n" + - " KEY `fk` (`b`),\n" + - " CONSTRAINT `fk` FOREIGN KEY (`b`) REFERENCES `test1`.`t1` (`id`)\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) - - // Test issue #20327 - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, b char(10) as ('a'));") - result := tk.MustQuery("show create table t;").Rows()[0][1] - require.Regexp(t, `(?s).*GENERATED ALWAYS AS \(_utf8mb4'a'\).*`, result) - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int, b char(10) as (_utf8'a'));") - result = tk.MustQuery("show create table t;").Rows()[0][1] - require.Regexp(t, `(?s).*GENERATED ALWAYS AS \(_utf8'a'\).*`, result) - // Test show list partition table - tk.MustExec("set @@session.tidb_enable_list_partition = ON") - tk.MustExec(`DROP TABLE IF EXISTS t`) - tk.MustExec(`create table t (id int, name varchar(10), unique index idx (id)) partition by list (id) ( - partition p0 values in (3,5,6,9,17), - partition p1 values in (1,2,10,11,19,20), - partition p2 values in (4,12,13,14,18), - partition p3 values in (7,8,15,16,null) - );`) - tk.MustQuery(`show create table t`).Check(testkit.RowsWithSep("|", - "t CREATE TABLE `t` (\n"+ - " `id` int(11) DEFAULT NULL,\n"+ - " `name` varchar(10) DEFAULT NULL,\n"+ - " UNIQUE KEY `idx` (`id`)\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n"+ - "PARTITION BY LIST (`id`)\n"+ - "(PARTITION `p0` VALUES IN (3,5,6,9,17),\n"+ - " PARTITION `p1` VALUES IN (1,2,10,11,19,20),\n"+ - " PARTITION `p2` VALUES IN (4,12,13,14,18),\n"+ - " PARTITION `p3` VALUES IN (7,8,15,16,NULL))")) - // Test show list column partition table - tk.MustExec(`DROP TABLE IF EXISTS t`) - tk.MustExec(`create table t (id int, name varchar(10), unique index idx (id)) partition by list columns (id) ( - partition p0 values in (3,5,6,9,17), - partition p1 values in (1,2,10,11,19,20), - partition p2 values in (4,12,13,14,18), - partition p3 values in (7,8,15,16,null) - );`) - tk.MustQuery(`show create table t`).Check(testkit.RowsWithSep("|", - "t CREATE TABLE `t` (\n"+ - " `id` int(11) DEFAULT NULL,\n"+ - " `name` varchar(10) DEFAULT NULL,\n"+ - " UNIQUE KEY `idx` (`id`)\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n"+ - "PARTITION BY LIST COLUMNS(`id`)\n"+ - "(PARTITION `p0` VALUES IN (3,5,6,9,17),\n"+ - " PARTITION `p1` VALUES IN (1,2,10,11,19,20),\n"+ - " PARTITION `p2` VALUES IN (4,12,13,14,18),\n"+ - " PARTITION `p3` VALUES IN (7,8,15,16,NULL))")) - tk.MustExec(`DROP TABLE IF EXISTS t`) - tk.MustExec(`create table t (id int, name varchar(10), unique index idx (id, name)) partition by list columns (id, name) ( - partition p0 values in ((3, '1'), (5, '5')), - partition p1 values in ((1, '1')));`) - // The strings are single quoted in MySQL even if sql_mode doesn't contain ANSI_QUOTES. - tk.MustQuery(`show create table t`).Check(testkit.RowsWithSep("|", - "t CREATE TABLE `t` (\n"+ - " `id` int(11) DEFAULT NULL,\n"+ - " `name` varchar(10) DEFAULT NULL,\n"+ - " UNIQUE KEY `idx` (`id`,`name`)\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n"+ - "PARTITION BY LIST COLUMNS(`id`,`name`)\n"+ - "(PARTITION `p0` VALUES IN ((3,'1'),(5,'5')),\n"+ - " PARTITION `p1` VALUES IN ((1,'1')))")) - tk.MustExec(`DROP TABLE IF EXISTS t`) - tk.MustExec(`create table t (id int primary key, v varchar(255) not null, key idx_v (v) comment 'foo\'bar')`) - tk.MustQuery(`show create table t`).Check(testkit.RowsWithSep("|", - "t CREATE TABLE `t` (\n"+ - " `id` int(11) NOT NULL,\n"+ - " `v` varchar(255) NOT NULL,\n"+ - " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */,\n"+ - " KEY `idx_v` (`v`) COMMENT 'foo''bar'\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) - - // For issue #29922 - tk.MustExec("CREATE TABLE `thash` (\n `id` bigint unsigned NOT NULL,\n `data` varchar(255) DEFAULT NULL,\n PRIMARY KEY (`id`)\n)\nPARTITION BY HASH (`id`)\n(PARTITION pEven COMMENT = \"Even ids\",\n PARTITION pOdd COMMENT = \"Odd ids\");") - tk.MustQuery("show create table `thash`").Check(testkit.RowsWithSep("|", ""+ - "thash CREATE TABLE `thash` (\n"+ - " `id` bigint(20) unsigned NOT NULL,\n"+ - " `data` varchar(255) DEFAULT NULL,\n"+ - " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n"+ - "PARTITION BY HASH (`id`)\n"+ - "(PARTITION `pEven` COMMENT 'Even ids',\n"+ - " PARTITION `pOdd` COMMENT 'Odd ids')", - )) - // empty edge case - tk.MustExec("drop table if exists `thash`") - tk.MustExec("CREATE TABLE `thash` (\n `id` bigint unsigned NOT NULL,\n `data` varchar(255) DEFAULT NULL,\n PRIMARY KEY (`id`)\n)\nPARTITION BY HASH (`id`);") - tk.MustQuery("show create table `thash`").Check(testkit.RowsWithSep("|", ""+ - "thash CREATE TABLE `thash` (\n"+ - " `id` bigint(20) unsigned NOT NULL,\n"+ - " `data` varchar(255) DEFAULT NULL,\n"+ - " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n"+ - "PARTITION BY HASH (`id`) PARTITIONS 1", - )) - - // default value escape character '\\' display case - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(a int primary key, b varchar(20) default '\\\\');") - tk.MustQuery("show create table t;").Check(testkit.RowsWithSep("|", - ""+ - "t CREATE TABLE `t` (\n"+ - " `a` int(11) NOT NULL,\n"+ - " `b` varchar(20) DEFAULT '\\\\',\n"+ - " PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) - - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t(" + - "a set('a', 'b') charset binary," + - "b enum('a', 'b') charset ascii);") - tk.MustQuery("show create table t;").Check(testkit.RowsWithSep("|", - ""+ - "t CREATE TABLE `t` (\n"+ - " `a` set('a','b') CHARACTER SET binary COLLATE binary DEFAULT NULL,\n"+ - " `b` enum('a','b') CHARACTER SET ascii COLLATE ascii_bin DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) - - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t(a bit default (rand()))`) - tk.MustQuery(`show create table t`).Check(testkit.RowsWithSep("|", ""+ - "t CREATE TABLE `t` (\n"+ - " `a` bit(1) DEFAULT rand()\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) - - tk.MustExec(`drop table if exists t`) - err := tk.ExecToErr(`create table t (a varchar(255) character set ascii) partition by range columns (a) (partition p values less than (0xff))`) - require.ErrorContains(t, err, "[ddl:1654]Partition column values of incorrect type") - tk.MustExec(`create table t (a varchar(255) character set ascii) partition by range columns (a) (partition p values less than (0x7f))`) - tk.MustQuery(`show create table t`).Check(testkit.Rows( - "t CREATE TABLE `t` (\n" + - " `a` varchar(255) CHARACTER SET ascii COLLATE ascii_bin DEFAULT NULL\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin\n" + - "PARTITION BY RANGE COLUMNS(`a`)\n" + - "(PARTITION `p` VALUES LESS THAN (x'7f'))")) -} - func TestShowCreateTablePlacement(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -706,26 +241,6 @@ func TestShowVisibility(t *testing.T) { tk.MustExec("drop database showdatabase") } -func TestShowDatabasesInfoSchemaFirst(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustQuery("show databases").Check(testkit.Rows("INFORMATION_SCHEMA")) - tk.MustExec(`create user 'show'@'%'`) - - tk.MustExec(`create database AAAA`) - tk.MustExec(`create database BBBB`) - tk.MustExec(`grant select on AAAA.* to 'show'@'%'`) - tk.MustExec(`grant select on BBBB.* to 'show'@'%'`) - - tk1 := testkit.NewTestKit(t, store) - require.NoError(t, tk1.Session().Auth(&auth.UserIdentity{Username: "show", Hostname: "%"}, nil, nil, nil)) - tk1.MustQuery("show databases").Check(testkit.Rows("INFORMATION_SCHEMA", "AAAA", "BBBB")) - - tk.MustExec(`drop user 'show'@'%'`) - tk.MustExec(`drop database AAAA`) - tk.MustExec(`drop database BBBB`) -} - func TestShowWarnings(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -760,24 +275,6 @@ func TestShowWarnings(t *testing.T) { tk.MustQuery("select @@warning_count").Check(testkit.RowsWithSep("|", "0")) } -func TestShowErrors(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - testSQL := `create table if not exists show_errors (a int)` - tk.MustExec(testSQL) - testSQL = `create table show_errors (a int)` - // FIXME: 'test.show_errors' already exists - _, _ = tk.Exec(testSQL) - - tk.MustQuery("show errors").Check(testkit.RowsWithSep("|", "Error|1050|Table 'test.show_errors' already exists")) - - // eliminate previous errors - tk.MustExec("select 1") - _, _ = tk.Exec("create invalid") - tk.MustQuery("show errors").Check(testkit.RowsWithSep("|", "Error|1064|You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 14 near \"invalid\" ")) -} - func TestShowWarningsForExprPushdown(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) @@ -898,13 +395,6 @@ func TestIssue17794(t *testing.T) { tk1.MustQuery("show grants").Check(testkit.Rows("GRANT USAGE ON *.* TO 'root'@'8.8.%'")) } -func TestIssue3641(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustGetErrCode("show tables;", mysql.ErrNoDB) - tk.MustGetErrCode("show tables;", mysql.ErrNoDB) -} - func TestIssue10549(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1277,51 +767,6 @@ func TestShowTableStatus(t *testing.T) { tk.MustExec("drop database UPPER_CASE") } -func TestShowSlow(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - // The test result is volatile, because - // 1. Slow queries is stored in domain, which may be affected by other tests. - // 2. Collecting slow queries is a asynchronous process, check immediately may not get the expected result. - // 3. Make slow query like "select sleep(1)" would slow the CI. - // So, we just cover the code but do not check the result. - tk.MustQuery(`admin show slow recent 3`) - tk.MustQuery(`admin show slow top 3`) - tk.MustQuery(`admin show slow top internal 3`) - tk.MustQuery(`admin show slow top all 3`) -} - -func TestShowCreateStmtIgnoreLocalTemporaryTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - - // SHOW CREATE VIEW ignores local temporary table with the same name - tk.MustExec("drop view if exists v1") - tk.MustExec("create view v1 as select 1") - tk.MustExec("create temporary table v1 (a int)") - tk.MustQuery("show create table v1").Check(testkit.RowsWithSep("|", - ""+ - "v1 CREATE TEMPORARY TABLE `v1` (\n"+ - " `a` int(11) DEFAULT NULL\n"+ - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin", - )) - tk.MustExec("drop view v1") - err := tk.ExecToErr("show create view v1") - require.True(t, infoschema.ErrTableNotExists.Equal(err)) - - // SHOW CREATE SEQUENCE ignores local temporary table with the same name - tk.MustExec("drop view if exists seq1") - tk.MustExec("create sequence seq1") - tk.MustExec("create temporary table seq1 (a int)") - tk.MustQuery("show create sequence seq1").Check(testkit.RowsWithSep("|", - "seq1 CREATE SEQUENCE `seq1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB", - )) - tk.MustExec("drop sequence seq1") - err = tk.ExecToErr("show create sequence seq1") - require.True(t, infoschema.ErrTableNotExists.Equal(err)) -} - func TestAutoRandomBase(t *testing.T) { require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/meta/autoid/mockAutoIDChange", `return(true)`)) defer func() { @@ -1406,18 +851,6 @@ func TestShowEscape(t *testing.T) { tk.MustExec("set sql_mode=@old_sql_mode") } -func TestShowBuiltin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - res := tk.MustQuery("show builtins;") - require.NotNil(t, res) - rows := res.Rows() - const builtinFuncNum = 291 - require.Equal(t, builtinFuncNum, len(rows)) - require.Equal(t, rows[0][0].(string), "abs") - require.Equal(t, rows[builtinFuncNum-1][0].(string), "yearweek") -} - func TestShowClusterConfig(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1453,25 +886,21 @@ func TestShowClusterConfig(t *testing.T) { require.EqualError(t, tk.QueryToErr("show config"), confErr.Error()) } -func TestInvisibleCoprCacheConfig(t *testing.T) { +func TestShowConfig(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) rows := tk.MustQuery("show variables like '%config%'").Rows() require.Equal(t, 1, len(rows)) configValue := rows[0][1].(string) + + // Test copr-cache coprCacheVal := "\t\t\"copr-cache\": {\n" + "\t\t\t\"capacity-mb\": 1000\n" + "\t\t},\n" require.Equal(t, true, strings.Contains(configValue, coprCacheVal)) -} -func TestEnableGlobalKillConfig(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - rows := tk.MustQuery("show variables like '%config%'").Rows() - require.Equal(t, 1, len(rows)) - configValue := rows[0][1].(string) + // Test GlobalKill globalKillVal := "\"enable-global-kill\": true" require.True(t, strings.Contains(configValue, globalKillVal)) } @@ -1615,18 +1044,6 @@ func TestShowVar(t *testing.T) { testkit.RowsWithSep("|", "sql_mode|NO_BACKSLASH_ESCAPES")) } -// TestShowPerformanceSchema tests for Issue 19231 -func TestShowPerformanceSchema(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - // Ideally we should create a new performance_schema table here with indices that we run the tests on. - // However, its not possible to create a new performance_schema table since its a special in memory table. - // Instead the test below uses the default index on the table. - tk.MustQuery("SHOW INDEX FROM performance_schema.events_statements_summary_by_digest").Check( - testkit.Rows("events_statements_summary_by_digest 0 SCHEMA_NAME 1 SCHEMA_NAME A 0 YES BTREE YES NO", - "events_statements_summary_by_digest 0 SCHEMA_NAME 2 DIGEST A 0 YES BTREE YES NO")) -} - func TestShowCreatePlacementPolicy(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1641,89 +1058,6 @@ func TestShowCreatePlacementPolicy(t *testing.T) { tk.MustExec("DROP PLACEMENT POLICY xyz") } -func TestShowTemporaryTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create global temporary table t1 (id int) on commit delete rows") - tk.MustExec("create global temporary table t3 (i int primary key, j int) on commit delete rows") - // For issue https://github.com/pingcap/tidb/issues/24752 - tk.MustQuery("show create table t1").Check(testkit.Rows("t1 CREATE GLOBAL TEMPORARY TABLE `t1` (\n" + - " `id` int(11) DEFAULT NULL\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS")) - // No panic, fix issue https://github.com/pingcap/tidb/issues/24788 - expect := "CREATE GLOBAL TEMPORARY TABLE `t3` (\n" + - " `i` int(11) NOT NULL,\n" + - " `j` int(11) DEFAULT NULL,\n" + - " PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS" - tk.MustQuery("show create table t3").Check(testkit.Rows("t3 " + expect)) - - // Verify that the `show create table` result can be used to build the table. - createTable := strings.ReplaceAll(expect, "t3", "t4") - tk.MustExec(createTable) - - // Cover auto increment column. - tk.MustExec(`CREATE GLOBAL TEMPORARY TABLE t5 ( - id int(11) NOT NULL AUTO_INCREMENT, - b int(11) NOT NULL, - pad varbinary(255) DEFAULT NULL, - PRIMARY KEY (id), - KEY b (b)) ON COMMIT DELETE ROWS`) - expect = "CREATE GLOBAL TEMPORARY TABLE `t5` (\n" + - " `id` int(11) NOT NULL AUTO_INCREMENT,\n" + - " `b` int(11) NOT NULL,\n" + - " `pad` varbinary(255) DEFAULT NULL,\n" + - " PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */,\n" + - " KEY `b` (`b`)\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS" - tk.MustQuery("show create table t5").Check(testkit.Rows("t5 " + expect)) - - tk.MustExec("create temporary table t6 (i int primary key, j int)") - expect = "CREATE TEMPORARY TABLE `t6` (\n" + - " `i` int(11) NOT NULL,\n" + - " `j` int(11) DEFAULT NULL,\n" + - " PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin" - tk.MustQuery("show create table t6").Check(testkit.Rows("t6 " + expect)) - tk.MustExec("create temporary table t7 (i int primary key auto_increment, j int)") - defer func() { - tk.MustExec("commit;") - }() - tk.MustExec("begin;") - tk.MustExec("insert into t7 (j) values (14)") - tk.MustExec("insert into t7 (j) values (24)") - tk.MustQuery("select * from t7").Check(testkit.Rows("1 14", "2 24")) - expect = "CREATE TEMPORARY TABLE `t7` (\n" + - " `i` int(11) NOT NULL AUTO_INCREMENT,\n" + - " `j` int(11) DEFAULT NULL,\n" + - " PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=3" - tk.MustQuery("show create table t7").Check(testkit.Rows("t7 " + expect)) -} - -func TestShowCachedTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t1 (id int)") - tk.MustExec("alter table t1 cache") - tk.MustQuery("show create table t1").Check( - testkit.Rows("t1 CREATE TABLE `t1` (\n" + - " `id` int(11) DEFAULT NULL\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /* CACHED ON */")) - tk.MustQuery("select create_options from information_schema.tables where table_schema = 'test' and table_name = 't1'").Check( - testkit.Rows("cached=on")) - - tk.MustExec("alter table t1 nocache") - tk.MustQuery("show create table t1").Check( - testkit.Rows("t1 CREATE TABLE `t1` (\n" + - " `id` int(11) DEFAULT NULL\n" + - ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) - tk.MustQuery("select create_options from information_schema.tables where table_schema = 'test' and table_name = 't1'").Check( - testkit.Rows("")) -} - func TestShowBindingCache(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -1807,53 +1141,6 @@ func TestShowBindingCacheStatus(t *testing.T) { "1 1 198 Bytes 250 Bytes")) } -func TestShowDatabasesLike(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{ - Username: "root", Hostname: "%"}, nil, nil, nil)) - - tk.MustExec("DROP DATABASE IF EXISTS `TEST_$1`") - tk.MustExec("DROP DATABASE IF EXISTS `test_$2`") - tk.MustExec("CREATE DATABASE `TEST_$1`;") - tk.MustExec("CREATE DATABASE `test_$2`;") - - tk.MustQuery("SHOW DATABASES LIKE 'TEST_%'").Check(testkit.Rows("TEST_$1", "test_$2")) - tk.MustQuery("SHOW DATABASES LIKE 'test_%'").Check(testkit.Rows("TEST_$1", "test_$2")) -} - -func TestShowTableStatusLike(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("DROP table IF EXISTS `T1`") - tk.MustExec("CREATE table `T1` (a int);") - rows := tk.MustQuery("SHOW table status LIKE 't1'").Rows() - require.Equal(t, "T1", rows[0][0]) - - tk.MustExec("DROP table IF EXISTS `Li_1`") - tk.MustExec("DROP table IF EXISTS `li_2`") - - tk.MustExec("CREATE table `Li_1` (a int);") - tk.MustExec("CREATE table `li_2` (a int);") - - rows = tk.MustQuery("SHOW table status LIKE 'li%'").Rows() - require.Equal(t, "Li_1", rows[0][0]) - require.Equal(t, "li_2", rows[1][0]) -} - -func TestShowCollationsLike(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{ - Username: "root", Hostname: "%"}, nil, nil, nil)) - tk.MustQuery("SHOW COLLATION LIKE 'UTF8MB4_BI%'").Check(testkit.Rows("utf8mb4_bin utf8mb4 46 Yes Yes 1")) - tk.MustQuery("SHOW COLLATION LIKE 'utf8mb4_bi%'").Check(testkit.Rows("utf8mb4_bin utf8mb4 46 Yes Yes 1")) -} - func TestShowLimitReturnRow(t *testing.T) { store := testkit.CreateMockStore(t) @@ -1935,47 +1222,3 @@ func TestShowBindingDigestField(t *testing.T) { result = tk.MustQuery("show global bindings;") require.Equal(t, len(result.Rows()), 0) } - -func TestShowPasswordVariable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("SET GLOBAL authentication_ldap_sasl_bind_root_pwd = ''") - rs, err := tk.Exec("show variables like 'authentication_ldap_sasl_bind_root_pwd'") - require.NoError(t, err) - require.Equal(t, tk.ResultSetToResult(rs, "").Rows()[0][1], "") - rs, err = tk.Exec("SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_sasl_bind_root_pwd'") - require.NoError(t, err) - require.Equal(t, tk.ResultSetToResult(rs, "").Rows()[0][0], "") - - tk.MustExec("SET GLOBAL authentication_ldap_sasl_bind_root_pwd = password") - defer func() { - tk.MustExec("SET GLOBAL authentication_ldap_sasl_bind_root_pwd = ''") - }() - rs, err = tk.Exec("show variables like 'authentication_ldap_sasl_bind_root_pwd'") - require.NoError(t, err) - require.Equal(t, tk.ResultSetToResult(rs, "").Rows()[0][1], variable.MaskPwd) - rs, err = tk.Exec("SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_sasl_bind_root_pwd'") - require.NoError(t, err) - require.Equal(t, tk.ResultSetToResult(rs, "").Rows()[0][0], variable.MaskPwd) - - tk.MustExec("SET GLOBAL authentication_ldap_simple_bind_root_pwd = ''") - rs, err = tk.Exec("show variables like 'authentication_ldap_simple_bind_root_pwd'") - require.NoError(t, err) - require.Equal(t, tk.ResultSetToResult(rs, "").Rows()[0][1], "") - rs, err = tk.Exec("SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_simple_bind_root_pwd'") - require.NoError(t, err) - require.Equal(t, tk.ResultSetToResult(rs, "").Rows()[0][0], "") - - tk.MustExec("SET GLOBAL authentication_ldap_simple_bind_root_pwd = password") - defer func() { - tk.MustExec("SET GLOBAL authentication_ldap_simple_bind_root_pwd = ''") - }() - - rs, err = tk.Exec("show variables like 'authentication_ldap_simple_bind_root_pwd'") - require.NoError(t, err) - require.Equal(t, tk.ResultSetToResult(rs, "").Rows()[0][1], variable.MaskPwd) - rs, err = tk.Exec("SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_simple_bind_root_pwd'") - require.NoError(t, err) - require.Equal(t, tk.ResultSetToResult(rs, "").Rows()[0][0], variable.MaskPwd) -} diff --git a/pkg/executor/test/simpletest/BUILD.bazel b/pkg/executor/test/simpletest/BUILD.bazel index a94c47e4958dd..13e5b3f41c37b 100644 --- a/pkg/executor/test/simpletest/BUILD.bazel +++ b/pkg/executor/test/simpletest/BUILD.bazel @@ -4,26 +4,25 @@ go_test( name = "simpletest_test", timeout = "short", srcs = [ - "chunk_reuse_test.go", "main_test.go", "simple_test.go", ], flaky = True, race = "on", - shard_count = 36, + shard_count = 12, deps = [ "//pkg/config", - "//pkg/errno", "//pkg/parser/auth", "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", - "//pkg/planner/core", + "//pkg/server", "//pkg/session", "//pkg/sessionctx", "//pkg/store/mockstore", "//pkg/testkit", "//pkg/util/dbterror/exeerrors", + "//pkg/util/globalconn", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", "@io_opencensus_go//stats/view", diff --git a/pkg/executor/test/simpletest/chunk_reuse_test.go b/pkg/executor/test/simpletest/chunk_reuse_test.go deleted file mode 100644 index 9478ea6ffa302..0000000000000 --- a/pkg/executor/test/simpletest/chunk_reuse_test.go +++ /dev/null @@ -1,138 +0,0 @@ -// Copyright 2023 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package simpletest - -import ( - "testing" - - "github.com/pingcap/tidb/pkg/testkit" - "github.com/stretchr/testify/require" -) - -func TestLongBlobReuse(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t1 (id1 int ,id2 char(10) ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`) clustered,key id2(id2))") - tk.MustExec("insert into t1 (id1,id2)values(1,1);") - tk.MustExec("insert into t1 (id1,id2)values(2,2),(3,3);") - tk.MustExec("create table t2 (id1 int ,id2 char(10) ,id3 text,id4 blob,id5 json,id6 varchar(1000),PRIMARY KEY (`id1`) clustered,key id2(id2))") - tk.MustExec("insert into t2 (id1,id2)values(1,1);") - tk.MustExec("insert into t2 (id1,id2)values(2,2),(3,3);") - //IndexRangeScan - res := tk.MustQuery("explain select t1.id1 from t1,t2 where t1.id2 > '1' and t2.id2 > '1'") - require.Regexp(t, ".*IndexRangeScan*", res.Rows()[4][0]) - tk.MustQuery("select t1.id1 from t1,t2 where t1.id2 > '1' and t2.id2 > '1'").Check(testkit.Rows("2", "2", "3", "3")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1,t1.id2 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ").Check(testkit.Rows("2 2", "2 2", "3 3", "3 3")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1,t1.id3 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ").Check(testkit.Rows("2 ", "2 ", "3 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id4 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ").Check(testkit.Rows("2 ", "2 ", "3 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id5 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ").Check(testkit.Rows("2 ", "2 ", "3 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id6 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ").Check(testkit.Rows("2 ", "2 ", "3 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1,t1.id7 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ").Check(testkit.Rows("2 ", "2 ", "3 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - - //TableFullScan - res = tk.MustQuery("explain select t1.id1 from t1,t2 where t1.id2 > '1'and t1.id1 = t2.id1") - require.Regexp(t, ".*TableFullScan*", res.Rows()[2][0]) - tk.MustQuery("select t1.id1 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1").Check(testkit.Rows("2", "3")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1 ,t1.id3 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1").Check(testkit.Rows("2 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1 ,t1.id4 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1").Check(testkit.Rows("2 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1 ,t1.id5 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1").Check(testkit.Rows("2 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1 ,t1.id6 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1").Check(testkit.Rows("2 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1 ,t1.id7 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1").Check(testkit.Rows("2 ", "3 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - - //Point_Get - res = tk.MustQuery("explain select t1.id1 from t1,t2 where t1.id1 = 1 and t2.id1 = 1") - require.Regexp(t, ".*Point_Get*", res.Rows()[1][0]) - tk.MustQuery("select t1.id1 from t1,t2 where t1.id1 = 1 and t2.id1 = 1").Check(testkit.Rows("1")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1,t1.id2 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ").Check(testkit.Rows("1 1")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1,t1.id3 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id4 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id5 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id6 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1,t1.id7 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - - //IndexLookUp - res = tk.MustQuery("explain select t1.id1,t1.id6 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2' ;") - require.Regexp(t, ".*IndexLookUp*", res.Rows()[1][0]) - tk.MustQuery("select t1.id1,t1.id6 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select t1.id1,t1.id3 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id4 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id5 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id7 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select t1.id1,t1.id6 ,t2.id3 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'").Check(testkit.Rows("1 ")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - - // IndexMerge - tk.MustExec("create table t3 (id1 int ,id2 char(10),id8 int ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`) clustered,key id2(id2),key id8(id8))") - tk.MustExec("insert into t3 (id1,id2,id8)values(1,1,1),(2,2,2),(3,3,3);") - res = tk.MustQuery("explain select id1 from t3 where id2 > '3' or id8 < 10 union (select id1 from t3 where id2 > '4' or id8 < 7);") - require.Regexp(t, ".*IndexMerge*", res.Rows()[3][0]) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select id1 from t3 where id2 > '4' or id8 < 7);").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select id3 from t3 where id2 > '4' or id8 < 7);").Sort().Check(testkit.Rows("1", "2", "3", "")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select id4 from t3 where id2 > '4' or id8 < 7);").Sort().Check(testkit.Rows("1", "2", "3", "")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select id5 from t3 where id2 > '4' or id8 < 7);").Sort().Check(testkit.Rows("1", "2", "3", "")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select id6 from t3 where id2 > '4' or id8 < 7);").Sort().Check(testkit.Rows("1", "2", "3", "")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select id7 from t3 where id2 > '4' or id8 < 7);").Sort().Check(testkit.Rows("1", "2", "3", "")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - - //IndexReader - tk.MustExec("set tidb_enable_clustered_index = OFF") - tk.MustExec("create table t4 (id1 int ,id2 char(10),id8 int ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`),key id2(id2),key id8(id8,id2))") - tk.MustExec("insert into t4 (id1,id2,id8)values(1,1,1),(2,2,2),(3,3,3);") - res = tk.MustQuery("explain select id2 from t4 where id2 > '3' union (select id2 from t4 where id2 > '4');") - require.Regexp(t, ".*IndexReader*", res.Rows()[2][0]) - tk.MustQuery("select id2 from t4 where id2 > '3' union (select id2 from t4 where id2 > '4');").Sort().Check(testkit.Rows()) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - - //function - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select CHAR_LENGTH(id3) from t3 where id2 > '4' or id8 < 7);").Sort().Check(testkit.Rows("1", "2", "3", "")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select CHAR_LENGTH(id2) from t3 where id2 > '4' or id8 < 7);").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("1")) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select id2 from t3 where id2 > '4' or id8 < 7 and id3 is null);").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) - tk.MustQuery("select id1 from t3 where id2 > '3' or id8 < 10 union (select id2 from t3 where id2 > '4' or id8 < 7 and char_length(id3) > 0);").Sort().Check(testkit.Rows("1", "2", "3")) - tk.MustQuery("select @@last_sql_use_alloc").Check(testkit.Rows("0")) -} diff --git a/pkg/executor/test/simpletest/simple_test.go b/pkg/executor/test/simpletest/simple_test.go index 698208bc0e5d7..7843bed019ef2 100644 --- a/pkg/executor/test/simpletest/simple_test.go +++ b/pkg/executor/test/simpletest/simple_test.go @@ -16,43 +16,27 @@ package simpletest import ( "context" + "fmt" "strconv" "testing" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/parser/auth" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/planner/core" + "github.com/pingcap/tidb/pkg/server" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" + "github.com/pingcap/tidb/pkg/util/globalconn" "github.com/stretchr/testify/require" "go.opencensus.io/stats/view" ) -func TestFlushTables(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("FLUSH TABLES") - err := tk.ExecToErr("FLUSH TABLES WITH READ LOCK") - require.Error(t, err) -} - -func TestUseDB(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("USE test") - err := tk.ExecToErr("USE ``") - require.Truef(t, terror.ErrorEqual(core.ErrNoDB, err), "err %v", err) -} - func TestStmtAutoNewTxn(t *testing.T) { store := testkit.CreateMockStore(t) // Some statements are like DDL, they commit the previous txn automically. @@ -84,59 +68,6 @@ func TestStmtAutoNewTxn(t *testing.T) { tk.MustQuery("select * from auto_new").Check(testkit.Rows("1", "2")) } -func TestIssue9111(t *testing.T) { - store := testkit.CreateMockStore(t) - // CREATE USER / DROP USER fails if admin doesn't have insert privilege on `mysql.user` table. - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user 'user_admin'@'localhost';") - tk.MustExec("grant create user on *.* to 'user_admin'@'localhost';") - - // Create a new session. - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - defer se.Close() - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "user_admin", Hostname: "localhost"}, nil, nil, nil)) - - ctx := context.Background() - _, err = se.Execute(ctx, `create user test_create_user`) - require.NoError(t, err) - _, err = se.Execute(ctx, `drop user test_create_user`) - require.NoError(t, err) - - tk.MustExec("revoke create user on *.* from 'user_admin'@'localhost';") - tk.MustExec("grant insert, delete on mysql.user to 'user_admin'@'localhost';") - - _, err = se.Execute(ctx, `create user test_create_user`) - require.NoError(t, err) - _, err = se.Execute(ctx, `drop user test_create_user`) - require.NoError(t, err) - - _, err = se.Execute(ctx, `create role test_create_user`) - require.NoError(t, err) - _, err = se.Execute(ctx, `drop role test_create_user`) - require.NoError(t, err) - - tk.MustExec("drop user 'user_admin'@'localhost';") -} - -func TestRoleAtomic(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec("create role r2;") - err := tk.ExecToErr("create role r1, r2, r3") - require.Error(t, err) - // Check atomic create role. - result := tk.MustQuery(`SELECT user FROM mysql.User WHERE user in ('r1', 'r2', 'r3')`) - result.Check(testkit.Rows("r2")) - // Check atomic drop role. - err = tk.ExecToErr("drop role r1, r2, r3") - require.Error(t, err) - result = tk.MustQuery(`SELECT user FROM mysql.User WHERE user in ('r1', 'r2', 'r3')`) - result.Check(testkit.Rows("r2")) - tk.MustExec("drop role r2;") -} - func TestExtendedStatsPrivileges(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -177,122 +108,6 @@ func TestExtendedStatsPrivileges(t *testing.T) { tk.MustExec("drop user 'u1'@'%'") } -func TestIssue17247(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user 'issue17247'") - tk.MustExec("grant CREATE USER on *.* to 'issue17247'") - - tk1 := testkit.NewTestKit(t, store) - tk1.MustExec("use test") - require.NoError(t, tk1.Session().Auth(&auth.UserIdentity{Username: "issue17247", Hostname: "%"}, nil, nil, nil)) - tk1.MustExec("ALTER USER USER() IDENTIFIED BY 'xxx'") - tk1.MustExec("ALTER USER CURRENT_USER() IDENTIFIED BY 'yyy'") - tk1.MustExec("ALTER USER CURRENT_USER IDENTIFIED BY 'zzz'") - tk.MustExec("ALTER USER 'issue17247'@'%' IDENTIFIED BY 'kkk'") - tk.MustExec("ALTER USER 'issue17247'@'%' IDENTIFIED BY PASSWORD '*B50FBDB37F1256824274912F2A1CE648082C3F1F'") - // Wrong grammar - _, err := tk1.Exec("ALTER USER USER() IDENTIFIED BY PASSWORD '*B50FBDB37F1256824274912F2A1CE648082C3F1F'") - require.Error(t, err) -} - -// Close issue #23649. -// See https://github.com/pingcap/tidb/issues/23649 -func TestIssue23649(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("DROP USER IF EXISTS issue23649;") - tk.MustExec("CREATE USER issue23649;") - err := tk.ExecToErr("GRANT bogusrole to issue23649;") - require.Equal(t, "[executor:3523]Unknown authorization ID `bogusrole`@`%`", err.Error()) - err = tk.ExecToErr("GRANT bogusrole to nonexisting;") - require.Equal(t, "[executor:3523]Unknown authorization ID `bogusrole`@`%`", err.Error()) -} - -func TestSetCurrentUserPwd(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE USER issue28534;") - defer func() { - tk.MustExec("DROP USER IF EXISTS issue28534;") - }() - - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "issue28534", Hostname: "localhost", CurrentUser: true, AuthUsername: "issue28534", AuthHostname: "%"}, nil, nil, nil)) - tk.MustExec(`SET PASSWORD FOR CURRENT_USER() = "43582eussi"`) - - require.NoError(t, tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil)) - result := tk.MustQuery(`SELECT authentication_string FROM mysql.User WHERE User="issue28534"`) - result.Check(testkit.Rows(auth.EncodePassword("43582eussi"))) -} - -func TestShowGrantsAfterDropRole(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE USER u29473") - defer tk.MustExec("DROP USER IF EXISTS u29473") - - tk.MustExec("CREATE ROLE r29473") - tk.MustExec("GRANT r29473 TO u29473") - tk.MustExec("GRANT CREATE USER ON *.* TO u29473") - - tk.Session().Auth(&auth.UserIdentity{Username: "u29473", Hostname: "%"}, nil, nil, nil) - tk.MustExec("SET ROLE r29473") - tk.MustExec("DROP ROLE r29473") - tk.MustQuery("SHOW GRANTS").Check(testkit.Rows("GRANT CREATE USER ON *.* TO 'u29473'@'%'")) -} - -func TestPrivilegesAfterDropUser(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t1(id int, v int)") - defer tk.MustExec("drop table t1") - - tk.MustExec("CREATE USER u1 require ssl") - defer tk.MustExec("DROP USER IF EXISTS u1") - - tk.MustExec("GRANT CREATE ON test.* TO u1") - tk.MustExec("GRANT UPDATE ON test.t1 TO u1") - tk.MustExec("GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO u1") - tk.MustExec("GRANT SELECT(v), UPDATE(v) on test.t1 TO u1") - - tk.MustQuery("SELECT COUNT(1) FROM mysql.global_grants WHERE USER='u1' AND HOST='%'").Check(testkit.Rows("1")) - tk.MustQuery("SELECT COUNT(1) FROM mysql.global_priv WHERE USER='u1' AND HOST='%'").Check(testkit.Rows("1")) - tk.MustQuery("SELECT COUNT(1) FROM mysql.tables_priv WHERE USER='u1' AND HOST='%'").Check(testkit.Rows("1")) - tk.MustQuery("SELECT COUNT(1) FROM mysql.columns_priv WHERE USER='u1' AND HOST='%'").Check(testkit.Rows("1")) - tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil) - tk.MustQuery("SHOW GRANTS FOR u1").Check(testkit.Rows( - "GRANT USAGE ON *.* TO 'u1'@'%'", - "GRANT CREATE ON `test`.* TO 'u1'@'%'", - "GRANT UPDATE ON `test`.`t1` TO 'u1'@'%'", - "GRANT SELECT(v), UPDATE(v) ON `test`.`t1` TO 'u1'@'%'", - "GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO 'u1'@'%'", - )) - - tk.MustExec("DROP USER u1") - err := tk.QueryToErr("SHOW GRANTS FOR u1") - require.Equal(t, "[privilege:1141]There is no such grant defined for user 'u1' on host '%'", err.Error()) - tk.MustQuery("SELECT * FROM mysql.global_grants WHERE USER='u1' AND HOST='%'").Check(testkit.Rows()) - tk.MustQuery("SELECT * FROM mysql.global_priv WHERE USER='u1' AND HOST='%'").Check(testkit.Rows()) - tk.MustQuery("SELECT * FROM mysql.tables_priv WHERE USER='u1' AND HOST='%'").Check(testkit.Rows()) - tk.MustQuery("SELECT * FROM mysql.columns_priv WHERE USER='u1' AND HOST='%'").Check(testkit.Rows()) -} - -func TestDropRoleAfterRevoke(t *testing.T) { - store := testkit.CreateMockStore(t) - // issue 29781 - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - tk.Session().Auth(&auth.UserIdentity{Username: "root", Hostname: "%"}, nil, nil, nil) - - tk.MustExec("create role r1, r2, r3;") - defer tk.MustExec("drop role if exists r1, r2, r3;") - tk.MustExec("grant r1,r2,r3 to current_user();") - tk.MustExec("set role all;") - tk.MustExec("revoke r1, r3 from root;") - tk.MustExec("drop role r1;") -} - func TestUserWithSetNames(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -314,125 +129,6 @@ func TestUserWithSetNames(t *testing.T) { tk.MustExec("drop user '\xd2\xbb';") } -func TestStatementsCauseImplicitCommit(t *testing.T) { - // Test some of the implicit commit statements. - // See https://dev.mysql.com/doc/refman/5.7/en/implicit-commit.html - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") - tk.MustExec("create table ic (id int primary key)") - - cases := []string{ - "create table xx (id int)", - "create user 'xx'@'127.0.0.1'", - "grant SELECT on test.ic to 'xx'@'127.0.0.1'", - "flush privileges", - "analyze table ic", - } - for i, sql := range cases { - tk.MustExec("begin") - tk.MustExec("insert into ic values (?)", i) - tk.MustExec(sql) - tk.MustQuery("select * from ic where id = ?", i).Check(testkit.Rows(strconv.FormatInt(int64(i), 10))) - // Clean up data - tk.MustExec("delete from ic") - } -} - -func TestDo(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("do 1, @a:=1") - tk.MustQuery("select @a").Check(testkit.Rows("1")) - - tk.MustExec("use test") - tk.MustExec("create table t (i int)") - tk.MustExec("insert into t values (1)") - tk2 := testkit.NewTestKit(t, store) - tk2.MustExec("use test") - tk.MustQuery("select * from t").Check(testkit.Rows("1")) - tk.MustExec("do @a := (select * from t where i = 1)") - tk2.MustExec("insert into t values (2)") - tk.MustQuery("select * from t").Check(testkit.Rows("1", "2")) -} - -func TestDoWithAggFunc(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("DO sum(1)") - tk.MustExec("DO avg(@e+@f)") - tk.MustExec("DO GROUP_CONCAT(NULLIF(ELT(1, @e), 2.0) ORDER BY 1)") -} - -func TestSetRoleAllCorner(t *testing.T) { - store := testkit.CreateMockStore(t) - // For user with no role, `SET ROLE ALL` should active - // a empty slice, rather than nil. - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user set_role_all") - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - defer se.Close() - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "set_role_all", Hostname: "localhost"}, nil, nil, nil)) - ctx := context.Background() - _, err = se.Execute(ctx, `set role all`) - require.NoError(t, err) - _, err = se.Execute(ctx, `select current_role`) - require.NoError(t, err) -} - -func TestCreateRole(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user testCreateRole;") - tk.MustExec("grant CREATE USER on *.* to testCreateRole;") - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - defer se.Close() - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "testCreateRole", Hostname: "localhost"}, nil, nil, nil)) - - ctx := context.Background() - _, err = se.Execute(ctx, `create role test_create_role;`) - require.NoError(t, err) - tk.MustExec("revoke CREATE USER on *.* from testCreateRole;") - tk.MustExec("drop role test_create_role;") - tk.MustExec("grant CREATE ROLE on *.* to testCreateRole;") - _, err = se.Execute(ctx, `create role test_create_role;`) - require.NoError(t, err) - tk.MustExec("drop role test_create_role;") - _, err = se.Execute(ctx, `create user test_create_role;`) - require.Error(t, err) - tk.MustExec("drop user testCreateRole;") -} - -func TestDropRole(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user testCreateRole;") - tk.MustExec("create user test_create_role;") - tk.MustExec("grant CREATE USER on *.* to testCreateRole;") - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - defer se.Close() - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "testCreateRole", Hostname: "localhost"}, nil, nil, nil)) - - ctx := context.Background() - _, err = se.Execute(ctx, `drop role test_create_role;`) - require.NoError(t, err) - tk.MustExec("revoke CREATE USER on *.* from testCreateRole;") - tk.MustExec("create role test_create_role;") - tk.MustExec("grant DROP ROLE on *.* to testCreateRole;") - _, err = se.Execute(ctx, `drop role test_create_role;`) - require.NoError(t, err) - tk.MustExec("create user test_create_role;") - _, err = se.Execute(ctx, `drop user test_create_role;`) - require.Error(t, err) - tk.MustExec("drop user testCreateRole;") - tk.MustExec("drop user test_create_role;") -} - func TestTransaction(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -467,30 +163,6 @@ func inTxn(ctx sessionctx.Context) bool { return (ctx.GetSessionVars().Status & mysql.ServerStatusInTrans) > 0 } -func TestIssue33144(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - //Create role - tk.MustExec("create role 'r1' ;") - - sessionVars := tk.Session().GetSessionVars() - sessionVars.User = &auth.UserIdentity{Username: "root", Hostname: "localhost", AuthUsername: "root", AuthHostname: "%"} - - //Grant role to current_user() - tk.MustExec("grant 'r1' to current_user();") - //Revoke role from current_user() - tk.MustExec("revoke 'r1' from current_user();") - - //Grant role to current_user(),current_user() - tk.MustExec("grant 'r1' to current_user(),current_user();") - //Revoke role from current_user(),current_user() - tk.MustExec("revoke 'r1' from current_user(),current_user();") - - //Drop role - tk.MustExec("drop role 'r1' ;") -} - func TestRole(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -588,97 +260,6 @@ func TestRole(t *testing.T) { tk.MustExec("SET ROLE NONE") } -func TestRoleAdmin(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("CREATE USER 'testRoleAdmin';") - tk.MustExec("CREATE ROLE 'targetRole';") - - // Create a new session. - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - defer se.Close() - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "testRoleAdmin", Hostname: "localhost"}, nil, nil, nil)) - - ctx := context.Background() - _, err = se.Execute(ctx, "GRANT `targetRole` TO `testRoleAdmin`;") - require.Error(t, err) - - tk.MustExec("GRANT SUPER ON *.* TO `testRoleAdmin`;") - _, err = se.Execute(ctx, "GRANT `targetRole` TO `testRoleAdmin`;") - require.NoError(t, err) - _, err = se.Execute(ctx, "REVOKE `targetRole` FROM `testRoleAdmin`;") - require.NoError(t, err) - - tk.MustExec("DROP USER 'testRoleAdmin';") - tk.MustExec("DROP ROLE 'targetRole';") -} - -func TestDefaultRole(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - createRoleSQL := `CREATE ROLE r_1, r_2, r_3, u_1;` - tk.MustExec(createRoleSQL) - - tk.MustExec("insert into mysql.role_edges (FROM_HOST,FROM_USER,TO_HOST,TO_USER) values ('%','r_1','%','u_1')") - tk.MustExec("insert into mysql.role_edges (FROM_HOST,FROM_USER,TO_HOST,TO_USER) values ('%','r_2','%','u_1')") - - tk.MustExec("flush privileges;") - - setRoleSQL := `SET DEFAULT ROLE r_3 TO u_1;` - err := tk.ExecToErr(setRoleSQL) - require.Error(t, err) - - setRoleSQL = `SET DEFAULT ROLE r_1 TO u_1000;` - err = tk.ExecToErr(setRoleSQL) - require.Error(t, err) - - setRoleSQL = `SET DEFAULT ROLE r_1, r_3 TO u_1;` - err = tk.ExecToErr(setRoleSQL) - require.Error(t, err) - - setRoleSQL = `SET DEFAULT ROLE r_1 TO u_1;` - err = tk.ExecToErr(setRoleSQL) - require.NoError(t, err) - result := tk.MustQuery(`SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"`) - result.Check(testkit.Rows("r_1")) - setRoleSQL = `SET DEFAULT ROLE r_2 TO u_1;` - err = tk.ExecToErr(setRoleSQL) - require.NoError(t, err) - result = tk.MustQuery(`SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"`) - result.Check(testkit.Rows("r_2")) - - setRoleSQL = `SET DEFAULT ROLE ALL TO u_1;` - err = tk.ExecToErr(setRoleSQL) - require.NoError(t, err) - result = tk.MustQuery(`SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"`) - result.Check(testkit.Rows("r_1", "r_2")) - - setRoleSQL = `SET DEFAULT ROLE NONE TO u_1;` - err = tk.ExecToErr(setRoleSQL) - require.NoError(t, err) - result = tk.MustQuery(`SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"`) - result.Check(nil) - - dropRoleSQL := `DROP USER r_1, r_2, r_3, u_1;` - tk.MustExec(dropRoleSQL) -} - -func TestSetDefaultRoleAll(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("create user test_all;") - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - defer se.Close() - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "test_all", Hostname: "localhost"}, nil, nil, nil)) - - ctx := context.Background() - _, err = se.Execute(ctx, "set default role all to test_all;") - require.NoError(t, err) -} - func TestUser(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -919,31 +500,6 @@ func TestSetPwd(t *testing.T) { result.Check(testkit.Rows(auth.EncodePassword("pwd"))) } -func TestFlushPrivileges(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - - tk.MustExec(`CREATE USER 'testflush'@'localhost' IDENTIFIED BY '';`) - tk.MustExec(`UPDATE mysql.User SET Select_priv='Y' WHERE User="testflush" and Host="localhost"`) - - // Create a new session. - se, err := session.CreateSession4Test(store) - require.NoError(t, err) - defer se.Close() - require.NoError(t, se.Auth(&auth.UserIdentity{Username: "testflush", Hostname: "localhost"}, nil, nil, nil)) - - ctx := context.Background() - // Before flush. - _, err = se.Execute(ctx, `SELECT authentication_string FROM mysql.User WHERE User="testflush" and Host="localhost"`) - require.Error(t, err) - - tk.MustExec("FLUSH PRIVILEGES") - - // After flush. - _, err = se.Execute(ctx, `SELECT authentication_string FROM mysql.User WHERE User="testflush" and Host="localhost"`) - require.NoError(t, err) -} - func TestFlushPrivilegesPanic(t *testing.T) { defer view.Stop() // Run in a separate suite because this test need to set SkipGrantTable config. @@ -1106,47 +662,56 @@ func TestDropStatsForMultipleTable(t *testing.T) { h.SetLease(0) } -func TestCreateUserWithLDAP(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) +func TestKillStmt(t *testing.T) { + store, dom := testkit.CreateMockStoreAndDomain(t) + sv := server.CreateMockServer(t, store) + sv.SetDomain(dom) + defer sv.Close() - tk.MustExec("CREATE USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_simple AS 'uid=bob,ou=People,dc=example,dc=com'") - tk.MustQuery("SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'").Check(testkit.Rows("localhost bob uid=bob,ou=People,dc=example,dc=com authentication_ldap_simple")) + conn1 := server.CreateMockConn(t, sv) + tk := testkit.NewTestKitWithSession(t, store, conn1.Context().Session) - tk.MustExec("CREATE USER 'bob2'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob2,ou=People,dc=example,dc=com'") - tk.MustQuery("SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob2'").Check(testkit.Rows("localhost bob2 uid=bob2,ou=People,dc=example,dc=com authentication_ldap_sasl")) -} + originCfg := config.GetGlobalConfig() + newCfg := *originCfg + newCfg.EnableGlobalKill = false + config.StoreGlobalConfig(&newCfg) + defer func() { + config.StoreGlobalConfig(originCfg) + }() -func TestAlterUserWithLDAP(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) + connID := conn1.ID() - // case 1: alter from a LDAP user to LDAP user - tk.MustExec("CREATE USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_simple AS 'uid=bob,ou=People,dc=example,dc=com'") - tk.MustQuery("SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'").Check(testkit.Rows("localhost bob uid=bob,ou=People,dc=example,dc=com authentication_ldap_simple")) - tk.MustExec("ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'") - tk.MustQuery("SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'").Check(testkit.Rows("localhost bob uid=bob,ou=Manager,dc=example,dc=com authentication_ldap_sasl")) - - // case 2: should ignore the password history - tk.MustExec("ALTER USER 'bob'@'localhost' PASSWORD HISTORY 5\n") - tk.MustExec("ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=People,dc=example,dc=com'") - tk.MustExec("ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'") - tk.MustExec("ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=People,dc=example,dc=com'") - tk.MustExec("ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'") -} + tk.MustExec("use test") + tk.MustExec(fmt.Sprintf("kill %d", connID)) + result := tk.MustQuery("show warnings") + result.Check(testkit.Rows("Warning 1105 Invalid operation. Please use 'KILL TIDB [CONNECTION | QUERY] [connectionID | CONNECTION_ID()]' instead")) -func TestIssue44098(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) + newCfg2 := *originCfg + newCfg2.EnableGlobalKill = true + config.StoreGlobalConfig(&newCfg2) + + // ZERO serverID, treated as truncated. + tk.MustExec("kill 1") + result = tk.MustQuery("show warnings") + result.Check(testkit.Rows("Warning 1105 Kill failed: Received a 32bits truncated ConnectionID, expect 64bits. Please execute 'KILL [CONNECTION | QUERY] ConnectionID' to send a Kill without truncating ConnectionID.")) + + // truncated + tk.MustExec("kill 101") + result = tk.MustQuery("show warnings") + result.Check(testkit.Rows("Warning 1105 Kill failed: Received a 32bits truncated ConnectionID, expect 64bits. Please execute 'KILL [CONNECTION | QUERY] ConnectionID' to send a Kill without truncating ConnectionID.")) + + // excceed int64 + tk.MustExec("kill 9223372036854775808") // 9223372036854775808 == 2^63 + result = tk.MustQuery("show warnings") + result.Check(testkit.Rows("Warning 1105 Parse ConnectionID failed: unexpected connectionID exceeds int64")) + + // local kill + connIDAllocator := globalconn.NewGlobalAllocator(dom.ServerID, false) + killConnID := connIDAllocator.NextID() + tk.MustExec("kill " + strconv.FormatUint(killConnID, 10)) + result = tk.MustQuery("show warnings") + result.Check(testkit.Rows()) - tk.MustExec("set global validate_password.enable = 1") - tk.MustExec("create user u1 identified with 'tidb_auth_token'") - tk.MustExec("create user u2 identified with 'auth_socket'") - tk.MustExec("create user u3 identified with 'authentication_ldap_simple'") - tk.MustExec("create user u4 identified with 'authentication_ldap_sasl'") - tk.MustGetErrCode("create user u5 identified with 'mysql_native_password'", errno.ErrNotValidPassword) - tk.MustGetErrCode("create user u5 identified with 'caching_sha2_password'", errno.ErrNotValidPassword) - tk.MustGetErrCode("create user u5 identified with 'tidb_sm3_password'", errno.ErrNotValidPassword) - tk.MustGetErrCode("create user u5 identified with 'mysql_clear_password'", errno.ErrPluginIsNotLoaded) - tk.MustGetErrCode("create user u5 identified with 'tidb_session_token'", errno.ErrPluginIsNotLoaded) + tk.MustExecToErr("kill rand()", "Invalid operation. Please use 'KILL TIDB [CONNECTION | QUERY] [connectionID | CONNECTION_ID()]' instead") + // remote kill is tested in `tests/globalkilltest` } diff --git a/pkg/executor/test/splittest/BUILD.bazel b/pkg/executor/test/splittest/BUILD.bazel index 7240c6591cc24..1d0dde98ec3c7 100644 --- a/pkg/executor/test/splittest/BUILD.bazel +++ b/pkg/executor/test/splittest/BUILD.bazel @@ -9,13 +9,11 @@ go_test( ], flaky = True, race = "on", - shard_count = 5, + shard_count = 3, deps = [ "//pkg/ddl", "//pkg/domain/infosync", - "//pkg/errno", "//pkg/kv", - "//pkg/parser/mysql", "//pkg/parser/terror", "//pkg/planner/core", "//pkg/sessionctx/variable", diff --git a/pkg/executor/test/splittest/split_table_test.go b/pkg/executor/test/splittest/split_table_test.go index 76522daf0026b..4a81c5a93553e 100644 --- a/pkg/executor/test/splittest/split_table_test.go +++ b/pkg/executor/test/splittest/split_table_test.go @@ -22,9 +22,7 @@ import ( "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/domain/infosync" - "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" plannercore "github.com/pingcap/tidb/pkg/planner/core" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -39,136 +37,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestSplitTableRegion(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t(a varchar(100),b int, index idx1(b,a))") - tk.MustExec(`split table t index idx1 by (10000,"abcd"),(10000000);`) - tk.MustGetErrCode(`split table t index idx1 by ("abcd");`, mysql.WarnDataTruncated) - - // Test for split index region. - // Check min value is more than max value. - tk.MustExec(`split table t index idx1 between (0) and (1000000000) regions 10`) - tk.MustGetErrCode(`split table t index idx1 between (2,'a') and (1,'c') regions 10`, errno.ErrInvalidSplitRegionRanges) - - // Check min value is invalid. - tk.MustGetErrMsg(`split table t index idx1 between () and (1) regions 10`, "Split index `idx1` region lower value count should more than 0") - - // Check max value is invalid. - tk.MustGetErrMsg(`split table t index idx1 between (1) and () regions 10`, "Split index `idx1` region upper value count should more than 0") - - // Check pre-split region num is too large. - tk.MustGetErrMsg(`split table t index idx1 between (0) and (1000000000) regions 10000`, "Split index region num exceeded the limit 1000") - - // Check pre-split region num 0 is invalid. - tk.MustGetErrMsg(`split table t index idx1 between (0) and (1000000000) regions 0`, "Split index region num should more than 0") - - // Test truncate error msg. - tk.MustGetErrMsg(`split table t index idx1 between ("aa") and (1000000000) regions 0`, "[types:1265]Incorrect value: 'aa' for column 'b'") - - // Test for split table region. - tk.MustExec(`split table t between (0) and (1000000000) regions 10`) - // Check the lower value is more than the upper value. - tk.MustGetErrCode(`split table t between (2) and (1) regions 10`, errno.ErrInvalidSplitRegionRanges) - - // Check the lower value is invalid. - tk.MustGetErrMsg(`split table t between () and (1) regions 10`, "Split table region lower value count should be 1") - - // Check upper value is invalid. - tk.MustGetErrMsg(`split table t between (1) and () regions 10`, "Split table region upper value count should be 1") - - // Check pre-split region num is too large. - tk.MustGetErrMsg(`split table t between (0) and (1000000000) regions 10000`, "Split table region num exceeded the limit 1000") - - // Check pre-split region num 0 is invalid. - tk.MustGetErrMsg(`split table t between (0) and (1000000000) regions 0`, "Split table region num should more than 0") - - // Test truncate error msg. - tk.MustGetErrMsg(`split table t between ("aa") and (1000000000) regions 10`, "[types:1265]Incorrect value: 'aa' for column '_tidb_rowid'") - - // Test split table region step is too small. - tk.MustGetErrCode(`split table t between (0) and (100) regions 10`, errno.ErrInvalidSplitRegionRanges) - - // Test split region by syntax. - tk.MustExec(`split table t by (0),(1000),(1000000)`) - - // Test split region twice to test for multiple batch split region requests. - tk.MustExec("create table t1(a int, b int)") - tk.MustQuery("split table t1 between(0) and (10000) regions 10;").Check(testkit.Rows("9 1")) - tk.MustQuery("split table t1 between(10) and (10010) regions 5;").Check(testkit.Rows("4 1")) - - // Test split region for partition table. - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int,b int) partition by hash(a) partitions 5;") - tk.MustQuery("split table t between (0) and (1000000) regions 5;").Check(testkit.Rows("20 1")) - // Test for `split for region` syntax. - tk.MustQuery("split region for partition table t between (1000000) and (100000000) regions 10;").Check(testkit.Rows("45 1")) - - // Test split region for partition table with specified partition. - tk.MustQuery("split table t partition (p1,p2) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1")) - // Test for `split for region` syntax. - tk.MustQuery("split region for partition table t partition (p3,p4) between (100000000) and (1000000000) regions 5;").Check(testkit.Rows("8 1")) -} - -func TestClusterIndexSplitTableIntegration(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("drop database if exists test_cluster_index_index_split_table_integration;") - tk.MustExec("create database test_cluster_index_index_split_table_integration;") - tk.MustExec("use test_cluster_index_index_split_table_integration;") - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec("create table t (a varchar(255), b double, c int, primary key (a, b));") - - // Value list length not match. - lowerMsg := "Split table region lower value count should be 2" - upperMsg := "Split table region upper value count should be 2" - tk.MustGetErrMsg("split table t between ('aaa') and ('aaa', 100.0) regions 10;", lowerMsg) - tk.MustGetErrMsg("split table t between ('aaa', 1.0) and ('aaa', 100.0, 11) regions 10;", upperMsg) - - // Value type not match. - errMsg := "[types:1265]Incorrect value: 'aaa' for column 'b'" - tk.MustGetErrMsg("split table t between ('aaa', 0.0) and (100.0, 'aaa') regions 10;", errMsg) - - // lower bound >= upper bound. - errMsg = "[executor:8212]Failed to split region ranges: Split table `t` region lower value (aaa,0) should less than the upper value (aaa,0)" - tk.MustGetErrMsg("split table t between ('aaa', 0.0) and ('aaa', 0.0) regions 10;", errMsg) - errMsg = "[executor:8212]Failed to split region ranges: Split table `t` region lower value (bbb,0) should less than the upper value (aaa,0)" - tk.MustGetErrMsg("split table t between ('bbb', 0.0) and ('aaa', 0.0) regions 10;", errMsg) - - // Exceed limit 1000. - errMsg = "Split table region num exceeded the limit 1000" - tk.MustGetErrMsg("split table t between ('aaa', 0.0) and ('aaa', 0.1) regions 100000;", errMsg) - - // Split on null values. - errMsg = "[planner:1048]Column 'a' cannot be null" - tk.MustGetErrMsg("split table t between (null, null) and (null, null) regions 1000;", errMsg) - tk.MustGetErrMsg("split table t by (null, null);", errMsg) - - // Success. - tk.MustExec("split table t between ('aaa', 0.0) and ('aaa', 100.0) regions 10;") - tk.MustExec("split table t by ('aaa', 0.0), ('aaa', 20.0), ('aaa', 100.0);") - tk.MustExec("split table t by ('aaa', 100.0), ('qqq', 20.0), ('zzz', 100.0), ('zzz', 1000.0);") - - tk.MustExec("drop table t;") - tk.MustExec("create table t (a int, b int, c int, d int, primary key(a, c, d));") - tk.MustQuery("split table t between (0, 0, 0) and (0, 0, 1) regions 1000;").Check(testkit.Rows("999 1")) - - tk.MustExec("drop table t;") - tk.MustExec("create table t (a int, b int, c int, d int, primary key(d, a, c));") - tk.MustQuery("split table t by (0, 0, 0), (1, 2, 3), (65535, 65535, 65535);").Check(testkit.Rows("3 1")) - - tk.MustExec("drop table if exists t;") - tk.MustExec("create table t (a varchar(255), b decimal, c int, primary key (a, b));") - errMsg = "[types:1265]Incorrect value: '' for column 'b'" - tk.MustGetErrMsg("split table t by ('aaa', '')", errMsg) - - tk.MustExec("drop table t;") - tk.MustExec("CREATE TABLE t (`id` varchar(10) NOT NULL, primary key (`id`) CLUSTERED);") - tk.MustGetErrCode("split table t index `primary` between (0) and (1000) regions 2;", errno.ErrKeyDoesNotExist) -} - func TestClusterIndexShowTableRegion(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) @@ -630,7 +498,6 @@ func TestShowTableRegion(t *testing.T) { rows = re.Rows() require.Len(t, rows, 3) require.Len(t, rows[0], 13) - tbl = external.GetTableByName(t, tk, "test", "t2_scheduling") require.Equal(t, "LEADER_CONSTRAINTS=\"[+region=us-east-1]\" FOLLOWERS=3 FOLLOWER_CONSTRAINTS=\"[+region=us-east-2]\"", rows[0][11]) require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[1][11]) require.Equal(t, "PRIMARY_REGION=\"cn-east-1\" REGIONS=\"cn-east-1,cn-east-2\" SCHEDULE=\"EVEN\"", rows[2][11]) diff --git a/pkg/executor/test/tiflashtest/BUILD.bazel b/pkg/executor/test/tiflashtest/BUILD.bazel index 1b9a556d06196..4afb61918b486 100644 --- a/pkg/executor/test/tiflashtest/BUILD.bazel +++ b/pkg/executor/test/tiflashtest/BUILD.bazel @@ -9,7 +9,7 @@ go_test( ], flaky = True, race = "on", - shard_count = 39, + shard_count = 40, deps = [ "//pkg/config", "//pkg/domain", @@ -22,7 +22,7 @@ go_test( "//pkg/testkit", "//pkg/testkit/external", "//pkg/util/dbterror/exeerrors", - "//pkg/util/memory", + "//pkg/util/sqlkiller", "//pkg/util/tiflashcompute", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/executor/test/tiflashtest/tiflash_test.go b/pkg/executor/test/tiflashtest/tiflash_test.go index 2b0796de8f6d5..1502e54baf76b 100644 --- a/pkg/executor/test/tiflashtest/tiflash_test.go +++ b/pkg/executor/test/tiflashtest/tiflash_test.go @@ -20,7 +20,6 @@ import ( "math/rand" "strings" "sync" - "sync/atomic" "testing" "time" @@ -37,7 +36,7 @@ import ( "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" - "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/pingcap/tidb/pkg/util/tiflashcompute" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/testutils" @@ -682,7 +681,7 @@ func TestCancelMppTasks(t *testing.T) { // mock executor does not support use outer table as build side for outer join, so need to // force the inner table as build side tk.MustExec("set tidb_opt_mpp_outer_join_fixed_build_side=1") - atomic.StoreUint32(&tk.Session().GetSessionVars().Killed, 0) + tk.Session().GetSessionVars().SQLKiller.Reset() require.Nil(t, failpoint.Enable(hang, `return(true)`)) wg := &sync.WaitGroup{} wg.Add(1) @@ -693,7 +692,7 @@ func TestCancelMppTasks(t *testing.T) { require.Equal(t, int(exeerrors.ErrQueryInterrupted.Code()), int(terror.ToSQLError(errors.Cause(err).(*terror.Error)).Code)) }() time.Sleep(1 * time.Second) - atomic.StoreUint32(&tk.Session().GetSessionVars().Killed, 1) + tk.Session().GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) wg.Wait() require.Nil(t, failpoint.Disable(hang)) } @@ -1481,7 +1480,7 @@ func TestMPPMemoryTracker(t *testing.T) { }() err = tk.QueryToErr("select * from t") require.NotNil(t, err) - require.True(t, strings.Contains(err.Error(), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery)) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } func TestTiFlashComputeDispatchPolicy(t *testing.T) { @@ -1736,6 +1735,42 @@ func TestMppStoreCntWithErrors(t *testing.T) { require.Nil(t, failpoint.Disable(mppStoreCountPDError)) } +func TestMPP47766(t *testing.T) { + store := testkit.CreateMockStore(t, withMockTiFlash(1)) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("set @@session.tidb_allow_mpp=1") + tk.MustExec("set @@session.tidb_enforce_mpp=1") + tk.MustExec("set @@session.tidb_allow_tiflash_cop=off") + + tk.MustExec("CREATE TABLE `traces` (" + + " `test_time` timestamp NOT NULL," + + " `test_time_gen` date GENERATED ALWAYS AS (date(`test_time`)) VIRTUAL," + + " KEY `traces_date_idx` (`test_time_gen`)" + + ")") + tk.MustExec("alter table `traces` set tiflash replica 1") + tb := external.GetTableByName(t, tk, "test", "traces") + err := domain.GetDomain(tk.Session()).DDL().UpdateTableReplicaInfo(tk.Session(), tb.Meta().ID, true) + require.NoError(t, err) + tk.MustQuery("explain select date(test_time), count(1) as test_date from `traces` group by 1").Check(testkit.Rows( + "Projection_4 8000.00 root test.traces.test_time_gen->Column#5, Column#4", + "└─HashAgg_8 8000.00 root group by:test.traces.test_time_gen, funcs:count(1)->Column#4, funcs:firstrow(test.traces.test_time_gen)->test.traces.test_time_gen", + " └─TableReader_20 10000.00 root MppVersion: 2, data:ExchangeSender_19", + " └─ExchangeSender_19 10000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─TableFullScan_18 10000.00 mpp[tiflash] table:traces keep order:false, stats:pseudo")) + tk.MustQuery("explain select /*+ read_from_storage(tiflash[traces]) */ date(test_time) as test_date, count(1) from `traces` group by 1").Check(testkit.Rows( + "TableReader_31 8000.00 root MppVersion: 2, data:ExchangeSender_30", + "└─ExchangeSender_30 8000.00 mpp[tiflash] ExchangeType: PassThrough", + " └─Projection_5 8000.00 mpp[tiflash] date(test.traces.test_time)->Column#5, Column#4", + " └─Projection_26 8000.00 mpp[tiflash] Column#4, test.traces.test_time", + " └─HashAgg_27 8000.00 mpp[tiflash] group by:Column#13, funcs:sum(Column#14)->Column#4, funcs:firstrow(Column#15)->test.traces.test_time", + " └─ExchangeReceiver_29 8000.00 mpp[tiflash] ", + " └─ExchangeSender_28 8000.00 mpp[tiflash] ExchangeType: HashPartition, Compression: FAST, Hash Cols: [name: Column#13, collate: binary]", + " └─HashAgg_25 8000.00 mpp[tiflash] group by:Column#17, funcs:count(1)->Column#14, funcs:firstrow(Column#16)->Column#15", + " └─Projection_32 10000.00 mpp[tiflash] test.traces.test_time->Column#16, date(test.traces.test_time)->Column#17", + " └─TableFullScan_15 10000.00 mpp[tiflash] table:traces keep order:false, stats:pseudo")) +} + func TestUnionScan(t *testing.T) { store := testkit.CreateMockStore(t, withMockTiFlash(2)) tk := testkit.NewTestKit(t, store) diff --git a/pkg/executor/test/unstabletest/BUILD.bazel b/pkg/executor/test/unstabletest/BUILD.bazel index 06b004f5391a7..6e5dbfb0ff86a 100644 --- a/pkg/executor/test/unstabletest/BUILD.bazel +++ b/pkg/executor/test/unstabletest/BUILD.bazel @@ -6,15 +6,14 @@ go_test( srcs = [ "main_test.go", "memory_test.go", - "unstable_test.go", ], flaky = True, - shard_count = 4, deps = [ "//pkg/config", "//pkg/meta/autoid", "//pkg/testkit", "//pkg/util", + "//pkg/util/dbterror/exeerrors", "//pkg/util/memory", "//pkg/util/skip", "@com_github_stretchr_testify//require", diff --git a/pkg/executor/test/unstabletest/memory_test.go b/pkg/executor/test/unstabletest/memory_test.go index 068c905018255..2263308d5af58 100644 --- a/pkg/executor/test/unstabletest/memory_test.go +++ b/pkg/executor/test/unstabletest/memory_test.go @@ -20,13 +20,87 @@ import ( "runtime" "runtime/debug" "testing" + "time" "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" + "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/skip" "github.com/stretchr/testify/require" ) +func TestGlobalMemoryControl(t *testing.T) { + // will timeout when data race enabled + skip.UnderShort(t) + // original position at executor_test.go + store, dom := testkit.CreateMockStoreAndDomain(t) + + tk0 := testkit.NewTestKit(t, store) + tk0.MustExec("set global tidb_mem_oom_action = 'cancel'") + tk0.MustExec("set global tidb_server_memory_limit = 512 << 20") + tk0.MustExec("set global tidb_server_memory_limit_sess_min_size = 128") + + tk1 := testkit.NewTestKit(t, store) + tracker1 := tk1.Session().GetSessionVars().MemTracker + tracker1.FallbackOldAndSetNewAction(&memory.PanicOnExceed{}) + + tk2 := testkit.NewTestKit(t, store) + tracker2 := tk2.Session().GetSessionVars().MemTracker + tracker2.FallbackOldAndSetNewAction(&memory.PanicOnExceed{}) + + tk3 := testkit.NewTestKit(t, store) + tracker3 := tk3.Session().GetSessionVars().MemTracker + tracker3.FallbackOldAndSetNewAction(&memory.PanicOnExceed{}) + + sm := &testkit.MockSessionManager{ + PS: []*util.ProcessInfo{tk1.Session().ShowProcess(), tk2.Session().ShowProcess(), tk3.Session().ShowProcess()}, + } + dom.ServerMemoryLimitHandle().SetSessionManager(sm) + go dom.ServerMemoryLimitHandle().Run() + + tracker1.Consume(100 << 20) // 100 MB + tracker2.Consume(200 << 20) // 200 MB + tracker3.Consume(300 << 20) // 300 MB + + test := make([]int, 128<<20) // Keep 1GB HeapInUse + time.Sleep(500 * time.Millisecond) // The check goroutine checks the memory usage every 100ms. The Sleep() make sure that Top1Tracker can be Canceled. + + // Kill Top1 + require.NoError(t, tracker1.Killer.HandleSignal()) + require.NoError(t, tracker2.Killer.HandleSignal()) + require.True(t, exeerrors.ErrMemoryExceedForInstance.Equal(tracker3.Killer.HandleSignal())) + require.Equal(t, memory.MemUsageTop1Tracker.Load(), tracker3) + util.WithRecovery( // Next Consume() will panic and cancel the SQL + func() { + tracker3.Consume(1) + }, func(r interface{}) { + require.True(t, exeerrors.ErrMemoryExceedForInstance.Equal(r.(error))) + }) + tracker2.Consume(300 << 20) // Sum 500MB, Not Panic, Waiting t3 cancel finish. + time.Sleep(500 * time.Millisecond) + require.NoError(t, tracker2.Killer.HandleSignal()) + // Kill Finished + tracker3.Consume(-(300 << 20)) + // Simulated SQL is Canceled and the time is updated + sm.PSMu.Lock() + ps := *sm.PS[2] + ps.Time = time.Now() + sm.PS[2] = &ps + sm.PSMu.Unlock() + time.Sleep(500 * time.Millisecond) + // Kill the Next SQL + util.WithRecovery( // Next Consume() will panic and cancel the SQL + func() { + tracker2.Consume(1) + }, func(r interface{}) { + require.True(t, exeerrors.ErrMemoryExceedForInstance.Equal(r.(error))) + }) + require.Equal(t, test[0], 0) // Keep 1GB HeapInUse +} + func TestPBMemoryLeak(t *testing.T) { + // will timeout when data race enabled skip.UnderShort(t) debug.SetGCPercent(1000) defer debug.SetGCPercent(100) diff --git a/pkg/executor/test/unstabletest/unstable_test.go b/pkg/executor/test/unstabletest/unstable_test.go deleted file mode 100644 index 7c615605e6fd3..0000000000000 --- a/pkg/executor/test/unstabletest/unstable_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright 2023 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package unstabletest - -import ( - "fmt" - "strings" - "testing" - "time" - - "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/memory" - "github.com/pingcap/tidb/pkg/util/skip" - "github.com/stretchr/testify/require" -) - -func TestCartesianJoinPanic(t *testing.T) { - skip.UnderShort(t) - // original position at join_test.go - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("create table t(a int)") - tk.MustExec("insert into t values(1)") - tk.MustExec("set tidb_mem_quota_query = 1 << 28") - tk.MustExec("set global tidb_mem_oom_action = 'CANCEL'") - tk.MustExec("set global tidb_enable_tmp_storage_on_oom = off;") - for i := 0; i < 14; i++ { - tk.MustExec("insert into t select * from t") - } - err := tk.QueryToErr("desc analyze select * from t t1, t t2, t t3, t t4, t t5, t t6;") - require.ErrorContains(t, err, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery) -} - -func TestGlobalMemoryControl(t *testing.T) { - skip.UnderShort(t) - // original position at executor_test.go - store, dom := testkit.CreateMockStoreAndDomain(t) - - tk0 := testkit.NewTestKit(t, store) - tk0.MustExec("set global tidb_mem_oom_action = 'cancel'") - tk0.MustExec("set global tidb_server_memory_limit = 512 << 20") - tk0.MustExec("set global tidb_server_memory_limit_sess_min_size = 128") - - tk1 := testkit.NewTestKit(t, store) - tracker1 := tk1.Session().GetSessionVars().MemTracker - tracker1.FallbackOldAndSetNewAction(&memory.PanicOnExceed{}) - - tk2 := testkit.NewTestKit(t, store) - tracker2 := tk2.Session().GetSessionVars().MemTracker - tracker2.FallbackOldAndSetNewAction(&memory.PanicOnExceed{}) - - tk3 := testkit.NewTestKit(t, store) - tracker3 := tk3.Session().GetSessionVars().MemTracker - tracker3.FallbackOldAndSetNewAction(&memory.PanicOnExceed{}) - - sm := &testkit.MockSessionManager{ - PS: []*util.ProcessInfo{tk1.Session().ShowProcess(), tk2.Session().ShowProcess(), tk3.Session().ShowProcess()}, - } - dom.ServerMemoryLimitHandle().SetSessionManager(sm) - go dom.ServerMemoryLimitHandle().Run() - - tracker1.Consume(100 << 20) // 100 MB - tracker2.Consume(200 << 20) // 200 MB - tracker3.Consume(300 << 20) // 300 MB - - test := make([]int, 128<<20) // Keep 1GB HeapInUse - time.Sleep(500 * time.Millisecond) // The check goroutine checks the memory usage every 100ms. The Sleep() make sure that Top1Tracker can be Canceled. - - // Kill Top1 - require.False(t, tracker1.NeedKill.Load()) - require.False(t, tracker2.NeedKill.Load()) - require.True(t, tracker3.NeedKill.Load()) - require.Equal(t, memory.MemUsageTop1Tracker.Load(), tracker3) - util.WithRecovery( // Next Consume() will panic and cancel the SQL - func() { - tracker3.Consume(1) - }, func(r interface{}) { - require.True(t, strings.Contains(r.(string), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForInstance)) - }) - tracker2.Consume(300 << 20) // Sum 500MB, Not Panic, Waiting t3 cancel finish. - time.Sleep(500 * time.Millisecond) - require.False(t, tracker2.NeedKill.Load()) - // Kill Finished - tracker3.Consume(-(300 << 20)) - // Simulated SQL is Canceled and the time is updated - sm.PSMu.Lock() - ps := *sm.PS[2] - ps.Time = time.Now() - sm.PS[2] = &ps - sm.PSMu.Unlock() - time.Sleep(500 * time.Millisecond) - // Kill the Next SQL - util.WithRecovery( // Next Consume() will panic and cancel the SQL - func() { - tracker2.Consume(1) - }, func(r interface{}) { - require.True(t, strings.Contains(r.(string), memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForInstance)) - }) - require.Equal(t, test[0], 0) // Keep 1GB HeapInUse -} - -func TestAdminCheckTable(t *testing.T) { - skip.UnderShort(t) - // test NULL value. - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`CREATE TABLE test_null ( - a int(11) NOT NULL, - c int(11) NOT NULL, - PRIMARY KEY (a, c), - KEY idx_a (a) - ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin`) - - tk.MustExec(`insert into test_null(a, c) values(2, 2);`) - tk.MustExec(`ALTER TABLE test_null ADD COLUMN b int NULL DEFAULT '1795454803' AFTER a;`) - tk.MustExec(`ALTER TABLE test_null add index b(b);`) - tk.MustExec("ADMIN CHECK TABLE test_null") - - // Fix unflatten issue in CheckExec. - tk.MustExec(`drop table if exists test`) - tk.MustExec(`create table test ( - a time, - PRIMARY KEY (a) - );`) - - tk.MustExec(`insert into test set a='12:10:36';`) - tk.MustExec(`admin check table test`) - - // Test decimal - tk.MustExec(`drop table if exists test`) - tk.MustExec("CREATE TABLE test ( a decimal, PRIMARY KEY (a));") - tk.MustExec("insert into test set a=10;") - tk.MustExec("admin check table test;") - - // Test timestamp type check table. - tk.MustExec(`drop table if exists test`) - tk.MustExec(`create table test ( a TIMESTAMP, primary key(a) );`) - tk.MustExec(`insert into test set a='2015-08-10 04:18:49';`) - tk.MustExec(`admin check table test;`) - - // Test partitioned table. - tk.MustExec(`drop table if exists test`) - tk.MustExec(`create table test ( - a int not null, - c int not null, - primary key (a, c), - key idx_a (a)) partition by range (c) ( - partition p1 values less than (1), - partition p2 values less than (4), - partition p3 values less than (7), - partition p4 values less than (11))`) - for i := 1; i <= 10; i++ { - tk.MustExec(fmt.Sprintf("insert into test values (%d, %d);", i, i)) - } - tk.MustExec(`admin check table test;`) - - // Test index in virtual generated column. - tk.MustExec(`drop table if exists test`) - tk.MustExec(`create table test ( b json , c int as (JSON_EXTRACT(b,'$.d')), index idxc(c));`) - tk.MustExec(`INSERT INTO test set b='{"d": 100}';`) - tk.MustExec(`admin check table test;`) - // Test prefix index. - tk.MustExec(`drop table if exists t`) - tk.MustExec(`CREATE TABLE t ( - ID CHAR(32) NOT NULL, - name CHAR(32) NOT NULL, - value CHAR(255), - INDEX indexIDname (ID(8),name(8)));`) - tk.MustExec(`INSERT INTO t VALUES ('keyword','urlprefix','text/ /text');`) - tk.MustExec(`admin check table t;`) - - tk.MustExec("use mysql") - tk.MustExec(`admin check table test.t;`) - err := tk.ExecToErr("admin check table t") - require.Error(t, err) - - // test add index on time type column which have default value - tk.MustExec("use test") - tk.MustExec(`drop table if exists t1`) - tk.MustExec(`CREATE TABLE t1 (c2 YEAR, PRIMARY KEY (c2))`) - tk.MustExec(`INSERT INTO t1 SET c2 = '1912'`) - tk.MustExec(`ALTER TABLE t1 ADD COLUMN c3 TIMESTAMP NULL DEFAULT '1976-08-29 16:28:11'`) - tk.MustExec(`ALTER TABLE t1 ADD COLUMN c4 DATE NULL DEFAULT '1976-08-29'`) - tk.MustExec(`ALTER TABLE t1 ADD COLUMN c5 TIME NULL DEFAULT '16:28:11'`) - tk.MustExec(`ALTER TABLE t1 ADD COLUMN c6 YEAR NULL DEFAULT '1976'`) - tk.MustExec(`ALTER TABLE t1 ADD INDEX idx1 (c2, c3,c4,c5,c6)`) - tk.MustExec(`ALTER TABLE t1 ADD INDEX idx2 (c2)`) - tk.MustExec(`ALTER TABLE t1 ADD INDEX idx3 (c3)`) - tk.MustExec(`ALTER TABLE t1 ADD INDEX idx4 (c4)`) - tk.MustExec(`ALTER TABLE t1 ADD INDEX idx5 (c5)`) - tk.MustExec(`ALTER TABLE t1 ADD INDEX idx6 (c6)`) - tk.MustExec(`admin check table t1`) - - // Test add index on decimal column. - tk.MustExec(`drop table if exists td1;`) - tk.MustExec(`CREATE TABLE td1 (c2 INT NULL DEFAULT '70');`) - tk.MustExec(`INSERT INTO td1 SET c2 = '5';`) - tk.MustExec(`ALTER TABLE td1 ADD COLUMN c4 DECIMAL(12,8) NULL DEFAULT '213.41598062';`) - tk.MustExec(`ALTER TABLE td1 ADD INDEX id2 (c4) ;`) - tk.MustExec(`ADMIN CHECK TABLE td1;`) - - // Test add not null column, then add index. - tk.MustExec(`drop table if exists t1`) - tk.MustExec(`create table t1 (a int);`) - tk.MustExec(`insert into t1 set a=2;`) - tk.MustExec(`alter table t1 add column b timestamp not null;`) - tk.MustExec(`alter table t1 add index(b);`) - tk.MustExec(`admin check table t1;`) - - // Test for index with change decimal precision. - tk.MustExec(`drop table if exists t1`) - tk.MustExec(`create table t1 (a decimal(2,1), index(a))`) - tk.MustExec(`insert into t1 set a='1.9'`) - err = tk.ExecToErr(`alter table t1 modify column a decimal(3,2);`) - require.NoError(t, err) - tk.MustExec(`delete from t1;`) - tk.MustExec(`admin check table t1;`) -} diff --git a/pkg/executor/testdata/executor_suite_in.json b/pkg/executor/testdata/executor_suite_in.json deleted file mode 100644 index 484014cd62126..0000000000000 --- a/pkg/executor/testdata/executor_suite_in.json +++ /dev/null @@ -1,559 +0,0 @@ -[ - { - "name": "TestRangePartitionBoundariesEq", - "cases": [ - "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a = -2147483648", - "SELECT * FROM t WHERE a IN (-2147483648)", - "SELECT * FROM t WHERE a = 0", - "SELECT * FROM t WHERE a IN (0)", - "SELECT * FROM t WHERE a = 999998", - "SELECT * FROM t WHERE a IN (999998)", - "SELECT * FROM t WHERE a = 999999", - "SELECT * FROM t WHERE a IN (999999)", - "SELECT * FROM t WHERE a = 1000000", - "SELECT * FROM t WHERE a IN (1000000)", - "SELECT * FROM t WHERE a = 1000001", - "SELECT * FROM t WHERE a IN (1000001)", - "SELECT * FROM t WHERE a = 1000002", - "SELECT * FROM t WHERE a IN (1000002)", - "SELECT * FROM t WHERE a = 3000000", - "SELECT * FROM t WHERE a IN (3000000)", - "SELECT * FROM t WHERE a = 3000001", - "SELECT * FROM t WHERE a IN (3000001)", - "SELECT * FROM t WHERE a IN (-2147483648, -2147483647)", - "SELECT * FROM t WHERE a IN (-2147483647, -2147483646)", - "SELECT * FROM t WHERE a IN (999997, 999998, 999999)", - "SELECT * FROM t WHERE a IN (999998, 999999, 1000000)", - "SELECT * FROM t WHERE a IN (999999, 1000000, 1000001)", - "SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002)", - "SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999)", - "SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000)", - "SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001)", - "SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002)", - "SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999)", - "SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000)", - "SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001)", - "SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002)" - ] - }, - { - "name": "TestRangePartitionBoundariesNe", - "cases": [ - "INSERT INTO t VALUES (0, '0 Filler...')", - "INSERT INTO t VALUES (1, '1 Filler...')", - "INSERT INTO t VALUES (2, '2 Filler...')", - "INSERT INTO t VALUES (3, '3 Filler...')", - "INSERT INTO t VALUES (4, '4 Filler...')", - "INSERT INTO t VALUES (5, '5 Filler...')", - "INSERT INTO t VALUES (6, '6 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a != -1", - "SELECT * FROM t WHERE 1 = 1 AND a != -1", - "SELECT * FROM t WHERE a NOT IN (-2, -1)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1", - "SELECT * FROM t WHERE a != 0", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0", - "SELECT * FROM t WHERE a != 1", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1", - "SELECT * FROM t WHERE a != 2", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2", - "SELECT * FROM t WHERE a != 3", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3", - "SELECT * FROM t WHERE a != 4", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4", - "SELECT * FROM t WHERE a != 5", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5", - "SELECT * FROM t WHERE a != 6", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6", - "SELECT * FROM t WHERE a != 7", - "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7", - "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7)", - "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7" - ] - }, - { - "name": "TestRangePartitionBoundariesBetweenM", - "cases": [ - "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647", - "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646", - "SELECT * FROM t WHERE a BETWEEN 0 AND -1", - "SELECT * FROM t WHERE a BETWEEN 0 AND 0", - "SELECT * FROM t WHERE a BETWEEN 0 AND 1", - "SELECT * FROM t WHERE a BETWEEN 0 AND 2", - "SELECT * FROM t WHERE a BETWEEN 0 AND 10", - "SELECT * FROM t WHERE a BETWEEN 0 AND 999998", - "SELECT * FROM t WHERE a BETWEEN 0 AND 999999", - "SELECT * FROM t WHERE a BETWEEN 0 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 0 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 0 AND 1000002", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 999997", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 999998", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 999999", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999", - "SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 999998", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 999999", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001", - "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002", - "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003", - "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001", - "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002", - "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003" - ] - }, - { - "name": "TestRangePartitionBoundariesBetweenS", - "cases": [ - "INSERT INTO t VALUES (0, '0 Filler...')", - "INSERT INTO t VALUES (1, '1 Filler...')", - "INSERT INTO t VALUES (2, '2 Filler...')", - "INSERT INTO t VALUES (3, '3 Filler...')", - "INSERT INTO t VALUES (4, '4 Filler...')", - "INSERT INTO t VALUES (5, '5 Filler...')", - "INSERT INTO t VALUES (6, '6 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a BETWEEN 2 AND -1", - "SELECT * FROM t WHERE a BETWEEN -1 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 0", - "SELECT * FROM t WHERE a BETWEEN 0 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 1", - "SELECT * FROM t WHERE a BETWEEN 1 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 2", - "SELECT * FROM t WHERE a BETWEEN 2 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 3", - "SELECT * FROM t WHERE a BETWEEN 3 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 4", - "SELECT * FROM t WHERE a BETWEEN 4 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 5", - "SELECT * FROM t WHERE a BETWEEN 5 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 6", - "SELECT * FROM t WHERE a BETWEEN 6 AND 4", - "SELECT * FROM t WHERE a BETWEEN 2 AND 7", - "SELECT * FROM t WHERE a BETWEEN 7 AND 4" - ] - }, - { - "name": "TestRangePartitionBoundariesLtM", - "cases": [ - "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a < -2147483648", - "SELECT * FROM t WHERE a > -2147483648", - "SELECT * FROM t WHERE a <= -2147483648", - "SELECT * FROM t WHERE a >= -2147483648", - "SELECT * FROM t WHERE a < 0", - "SELECT * FROM t WHERE a > 0", - "SELECT * FROM t WHERE a <= 0", - "SELECT * FROM t WHERE a >= 0", - "SELECT * FROM t WHERE a < 999998", - "SELECT * FROM t WHERE a > 999998", - "SELECT * FROM t WHERE a <= 999998", - "SELECT * FROM t WHERE a >= 999998", - "SELECT * FROM t WHERE a < 999999", - "SELECT * FROM t WHERE a > 999999", - "SELECT * FROM t WHERE a <= 999999", - "SELECT * FROM t WHERE a >= 999999", - "SELECT * FROM t WHERE a < 1000000", - "SELECT * FROM t WHERE a > 1000000", - "SELECT * FROM t WHERE a <= 1000000", - "SELECT * FROM t WHERE a >= 1000000", - "SELECT * FROM t WHERE a < 1000001", - "SELECT * FROM t WHERE a > 1000001", - "SELECT * FROM t WHERE a <= 1000001", - "SELECT * FROM t WHERE a >= 1000001", - "SELECT * FROM t WHERE a < 1000002", - "SELECT * FROM t WHERE a > 1000002", - "SELECT * FROM t WHERE a <= 1000002", - "SELECT * FROM t WHERE a >= 1000002", - "SELECT * FROM t WHERE a < 3000000", - "SELECT * FROM t WHERE a > 3000000", - "SELECT * FROM t WHERE a <= 3000000", - "SELECT * FROM t WHERE a >= 3000000", - "SELECT * FROM t WHERE a < 3000001", - "SELECT * FROM t WHERE a > 3000001", - "SELECT * FROM t WHERE a <= 3000001", - "SELECT * FROM t WHERE a >= 3000001", - "SELECT * FROM t WHERE a < 999997", - "SELECT * FROM t WHERE a > 999997", - "SELECT * FROM t WHERE a <= 999997", - "SELECT * FROM t WHERE a >= 999997", - "SELECT * FROM t WHERE a >= 999997 AND a <= 999999", - "SELECT * FROM t WHERE a > 999997 AND a <= 999999", - "SELECT * FROM t WHERE a > 999997 AND a < 999999", - "SELECT * FROM t WHERE a > 999997 AND a <= 999999", - "SELECT * FROM t WHERE a < 999998", - "SELECT * FROM t WHERE a > 999998", - "SELECT * FROM t WHERE a <= 999998", - "SELECT * FROM t WHERE a >= 999998", - "SELECT * FROM t WHERE a >= 999998 AND a <= 1000000", - "SELECT * FROM t WHERE a > 999998 AND a <= 1000000", - "SELECT * FROM t WHERE a > 999998 AND a < 1000000", - "SELECT * FROM t WHERE a > 999998 AND a <= 1000000", - "SELECT * FROM t WHERE a < 999999", - "SELECT * FROM t WHERE a > 999999", - "SELECT * FROM t WHERE a <= 999999", - "SELECT * FROM t WHERE a >= 999999", - "SELECT * FROM t WHERE a >= 999999 AND a <= 1000001", - "SELECT * FROM t WHERE a > 999999 AND a <= 1000001", - "SELECT * FROM t WHERE a > 999999 AND a < 1000001", - "SELECT * FROM t WHERE a > 999999 AND a <= 1000001", - "SELECT * FROM t WHERE a < 1000000", - "SELECT * FROM t WHERE a > 1000000", - "SELECT * FROM t WHERE a <= 1000000", - "SELECT * FROM t WHERE a >= 1000000", - "SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002", - "SELECT * FROM t WHERE a > 1000000 AND a <= 1000002", - "SELECT * FROM t WHERE a > 1000000 AND a < 1000002", - "SELECT * FROM t WHERE a > 1000000 AND a <= 1000002", - "SELECT * FROM t WHERE a < 1999997", - "SELECT * FROM t WHERE a > 1999997", - "SELECT * FROM t WHERE a <= 1999997", - "SELECT * FROM t WHERE a >= 1999997", - "SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999", - "SELECT * FROM t WHERE a > 1999997 AND a <= 1999999", - "SELECT * FROM t WHERE a > 1999997 AND a < 1999999", - "SELECT * FROM t WHERE a > 1999997 AND a <= 1999999", - "SELECT * FROM t WHERE a < 1999998", - "SELECT * FROM t WHERE a > 1999998", - "SELECT * FROM t WHERE a <= 1999998", - "SELECT * FROM t WHERE a >= 1999998", - "SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000", - "SELECT * FROM t WHERE a > 1999998 AND a <= 2000000", - "SELECT * FROM t WHERE a > 1999998 AND a < 2000000", - "SELECT * FROM t WHERE a > 1999998 AND a <= 2000000", - "SELECT * FROM t WHERE a < 1999999", - "SELECT * FROM t WHERE a > 1999999", - "SELECT * FROM t WHERE a <= 1999999", - "SELECT * FROM t WHERE a >= 1999999", - "SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001", - "SELECT * FROM t WHERE a > 1999999 AND a <= 2000001", - "SELECT * FROM t WHERE a > 1999999 AND a < 2000001", - "SELECT * FROM t WHERE a > 1999999 AND a <= 2000001", - "SELECT * FROM t WHERE a < 2000000", - "SELECT * FROM t WHERE a > 2000000", - "SELECT * FROM t WHERE a <= 2000000", - "SELECT * FROM t WHERE a >= 2000000", - "SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002", - "SELECT * FROM t WHERE a > 2000000 AND a <= 2000002", - "SELECT * FROM t WHERE a > 2000000 AND a < 2000002", - "SELECT * FROM t WHERE a > 2000000 AND a <= 2000002", - "SELECT * FROM t WHERE a < 2999997", - "SELECT * FROM t WHERE a > 2999997", - "SELECT * FROM t WHERE a <= 2999997", - "SELECT * FROM t WHERE a >= 2999997", - "SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999", - "SELECT * FROM t WHERE a > 2999997 AND a <= 2999999", - "SELECT * FROM t WHERE a > 2999997 AND a < 2999999", - "SELECT * FROM t WHERE a > 2999997 AND a <= 2999999", - "SELECT * FROM t WHERE a < 2999998", - "SELECT * FROM t WHERE a > 2999998", - "SELECT * FROM t WHERE a <= 2999998", - "SELECT * FROM t WHERE a >= 2999998", - "SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000", - "SELECT * FROM t WHERE a > 2999998 AND a <= 3000000", - "SELECT * FROM t WHERE a > 2999998 AND a < 3000000", - "SELECT * FROM t WHERE a > 2999998 AND a <= 3000000", - "SELECT * FROM t WHERE a < 2999999", - "SELECT * FROM t WHERE a > 2999999", - "SELECT * FROM t WHERE a <= 2999999", - "SELECT * FROM t WHERE a >= 2999999", - "SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001", - "SELECT * FROM t WHERE a > 2999999 AND a <= 3000001", - "SELECT * FROM t WHERE a > 2999999 AND a < 3000001", - "SELECT * FROM t WHERE a > 2999999 AND a <= 3000001", - "SELECT * FROM t WHERE a < 3000000", - "SELECT * FROM t WHERE a > 3000000", - "SELECT * FROM t WHERE a <= 3000000", - "SELECT * FROM t WHERE a >= 3000000", - "SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002", - "SELECT * FROM t WHERE a > 3000000 AND a <= 3000002", - "SELECT * FROM t WHERE a > 3000000 AND a < 3000002", - "SELECT * FROM t WHERE a > 3000000 AND a <= 3000002" - ] - }, - { - "name": "TestRangePartitionBoundariesLtS", - "cases": [ - "INSERT INTO t VALUES (0, '0 Filler...')", - "INSERT INTO t VALUES (1, '1 Filler...')", - "INSERT INTO t VALUES (2, '2 Filler...')", - "INSERT INTO t VALUES (3, '3 Filler...')", - "INSERT INTO t VALUES (4, '4 Filler...')", - "INSERT INTO t VALUES (5, '5 Filler...')", - "INSERT INTO t VALUES (6, '6 Filler...')", - "ANALYZE TABLE t", - "SELECT * FROM t WHERE a < -1", - "SELECT * FROM t WHERE a > -1", - "SELECT * FROM t WHERE a <= -1", - "SELECT * FROM t WHERE a >= -1", - "SELECT * FROM t WHERE a < 2 OR a > -1", - "SELECT * FROM t WHERE a > 2 AND a < -1", - "SELECT * FROM t WHERE NOT (a < 2 OR a > -1)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < -1)", - "SELECT * FROM t WHERE a < 2 OR a >= -1", - "SELECT * FROM t WHERE a >= 2 AND a < -1", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= -1)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < -1)", - "SELECT * FROM t WHERE a <= 2 OR a > -1", - "SELECT * FROM t WHERE a > 2 AND a <= -1", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > -1)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= -1)", - "SELECT * FROM t WHERE a <= 2 OR a >= -1", - "SELECT * FROM t WHERE a >= 2 AND a <= -1", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1)", - "SELECT * FROM t WHERE a < 0", - "SELECT * FROM t WHERE a > 0", - "SELECT * FROM t WHERE a <= 0", - "SELECT * FROM t WHERE a >= 0", - "SELECT * FROM t WHERE a < 2 OR a > 0", - "SELECT * FROM t WHERE a > 2 AND a < 0", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 0)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 0)", - "SELECT * FROM t WHERE a < 2 OR a >= 0", - "SELECT * FROM t WHERE a >= 2 AND a < 0", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 0)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 0)", - "SELECT * FROM t WHERE a <= 2 OR a > 0", - "SELECT * FROM t WHERE a > 2 AND a <= 0", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 0)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 0)", - "SELECT * FROM t WHERE a <= 2 OR a >= 0", - "SELECT * FROM t WHERE a >= 2 AND a <= 0", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0)", - "SELECT * FROM t WHERE a < 1", - "SELECT * FROM t WHERE a > 1", - "SELECT * FROM t WHERE a <= 1", - "SELECT * FROM t WHERE a >= 1", - "SELECT * FROM t WHERE a < 2 OR a > 1", - "SELECT * FROM t WHERE a > 2 AND a < 1", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 1)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 1)", - "SELECT * FROM t WHERE a < 2 OR a >= 1", - "SELECT * FROM t WHERE a >= 2 AND a < 1", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 1)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 1)", - "SELECT * FROM t WHERE a <= 2 OR a > 1", - "SELECT * FROM t WHERE a > 2 AND a <= 1", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 1)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 1)", - "SELECT * FROM t WHERE a <= 2 OR a >= 1", - "SELECT * FROM t WHERE a >= 2 AND a <= 1", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1)", - "SELECT * FROM t WHERE a < 2", - "SELECT * FROM t WHERE a > 2", - "SELECT * FROM t WHERE a <= 2", - "SELECT * FROM t WHERE a >= 2", - "SELECT * FROM t WHERE a < 2 OR a > 2", - "SELECT * FROM t WHERE a > 2 AND a < 2", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 2)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 2)", - "SELECT * FROM t WHERE a < 2 OR a >= 2", - "SELECT * FROM t WHERE a >= 2 AND a < 2", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 2)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 2)", - "SELECT * FROM t WHERE a <= 2 OR a > 2", - "SELECT * FROM t WHERE a > 2 AND a <= 2", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 2)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 2)", - "SELECT * FROM t WHERE a <= 2 OR a >= 2", - "SELECT * FROM t WHERE a >= 2 AND a <= 2", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2)", - "SELECT * FROM t WHERE a < 3", - "SELECT * FROM t WHERE a > 3", - "SELECT * FROM t WHERE a <= 3", - "SELECT * FROM t WHERE a >= 3", - "SELECT * FROM t WHERE a < 2 OR a > 3", - "SELECT * FROM t WHERE a > 2 AND a < 3", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 3)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 3)", - "SELECT * FROM t WHERE a < 2 OR a >= 3", - "SELECT * FROM t WHERE a >= 2 AND a < 3", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 3)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 3)", - "SELECT * FROM t WHERE a <= 2 OR a > 3", - "SELECT * FROM t WHERE a > 2 AND a <= 3", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 3)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 3)", - "SELECT * FROM t WHERE a <= 2 OR a >= 3", - "SELECT * FROM t WHERE a >= 2 AND a <= 3", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3)", - "SELECT * FROM t WHERE a < 4", - "SELECT * FROM t WHERE a > 4", - "SELECT * FROM t WHERE a <= 4", - "SELECT * FROM t WHERE a >= 4", - "SELECT * FROM t WHERE a < 2 OR a > 4", - "SELECT * FROM t WHERE a > 2 AND a < 4", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 4)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 4)", - "SELECT * FROM t WHERE a < 2 OR a >= 4", - "SELECT * FROM t WHERE a >= 2 AND a < 4", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 4)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 4)", - "SELECT * FROM t WHERE a <= 2 OR a > 4", - "SELECT * FROM t WHERE a > 2 AND a <= 4", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 4)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 4)", - "SELECT * FROM t WHERE a <= 2 OR a >= 4", - "SELECT * FROM t WHERE a >= 2 AND a <= 4", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4)", - "SELECT * FROM t WHERE a < 5", - "SELECT * FROM t WHERE a > 5", - "SELECT * FROM t WHERE a <= 5", - "SELECT * FROM t WHERE a >= 5", - "SELECT * FROM t WHERE a < 2 OR a > 5", - "SELECT * FROM t WHERE a > 2 AND a < 5", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 5)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 5)", - "SELECT * FROM t WHERE a < 2 OR a >= 5", - "SELECT * FROM t WHERE a >= 2 AND a < 5", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 5)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 5)", - "SELECT * FROM t WHERE a <= 2 OR a > 5", - "SELECT * FROM t WHERE a > 2 AND a <= 5", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 5)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 5)", - "SELECT * FROM t WHERE a <= 2 OR a >= 5", - "SELECT * FROM t WHERE a >= 2 AND a <= 5", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5)", - "SELECT * FROM t WHERE a < 6", - "SELECT * FROM t WHERE a > 6", - "SELECT * FROM t WHERE a <= 6", - "SELECT * FROM t WHERE a >= 6", - "SELECT * FROM t WHERE a < 2 OR a > 6", - "SELECT * FROM t WHERE a > 2 AND a < 6", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 6)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 6)", - "SELECT * FROM t WHERE a < 2 OR a >= 6", - "SELECT * FROM t WHERE a >= 2 AND a < 6", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 6)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 6)", - "SELECT * FROM t WHERE a <= 2 OR a > 6", - "SELECT * FROM t WHERE a > 2 AND a <= 6", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 6)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 6)", - "SELECT * FROM t WHERE a <= 2 OR a >= 6", - "SELECT * FROM t WHERE a >= 2 AND a <= 6", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6)", - "SELECT * FROM t WHERE a < 7", - "SELECT * FROM t WHERE a > 7", - "SELECT * FROM t WHERE a <= 7", - "SELECT * FROM t WHERE a >= 7", - "SELECT * FROM t WHERE a < 2 OR a > 7", - "SELECT * FROM t WHERE a > 2 AND a < 7", - "SELECT * FROM t WHERE NOT (a < 2 OR a > 7)", - "SELECT * FROM t WHERE NOT (a > 2 AND a < 7)", - "SELECT * FROM t WHERE a < 2 OR a >= 7", - "SELECT * FROM t WHERE a >= 2 AND a < 7", - "SELECT * FROM t WHERE NOT (a < 2 OR a >= 7)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a < 7)", - "SELECT * FROM t WHERE a <= 2 OR a > 7", - "SELECT * FROM t WHERE a > 2 AND a <= 7", - "SELECT * FROM t WHERE NOT (a <= 2 OR a > 7)", - "SELECT * FROM t WHERE NOT (a > 2 AND a <= 7)", - "SELECT * FROM t WHERE a <= 2 OR a >= 7", - "SELECT * FROM t WHERE a >= 2 AND a <= 7", - "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7)", - "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7)" - ] - } -] diff --git a/pkg/executor/testdata/executor_suite_out.json b/pkg/executor/testdata/executor_suite_out.json deleted file mode 100644 index 3ed1e0bfd0868..0000000000000 --- a/pkg/executor/testdata/executor_suite_out.json +++ /dev/null @@ -1,5773 +0,0 @@ -[ - { - "Name": "TestRangePartitionBoundariesEq", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a = -2147483648", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (-2147483648)", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 0", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (0)", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 999998", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999998)", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999999)", - "Plan": [ - "p0" - ], - "Res": [ - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 1000000", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1000000)", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1000001)", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1000002)", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a = 3000000", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a IN (3000000)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a = 3000001", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a IN (3000001)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a IN (-2147483648, -2147483647)", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (-2147483647, -2147483646)", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999997, 999998, 999999)", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999998, 999999, 1000000)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (999999, 1000000, 1000001)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002)", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999)", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000)", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001)", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002)", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999)", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000)", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001)", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002)", - "Plan": [ - "dual" - ], - "Res": null - } - ] - }, - { - "Name": "TestRangePartitionBoundariesNe", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1, '1 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2, '2 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (3, '3 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (4, '4 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (5, '5 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (6, '6 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a != -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a != 0", - "Plan": [ - "all" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0", - "Plan": [ - "all" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0)", - "Plan": [ - "all" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1", - "Plan": [ - "all" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1)", - "Plan": [ - "all" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 2", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2", - "Plan": [ - "all" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2)", - "Plan": [ - "all" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 3", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3", - "Plan": [ - "all" - ], - "Res": [ - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3)", - "Plan": [ - "all" - ], - "Res": [ - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3", - "Plan": [ - "p0 p1 p2 p3" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 4", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4", - "Plan": [ - "all" - ], - "Res": [ - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4)", - "Plan": [ - "all" - ], - "Res": [ - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 5", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5", - "Plan": [ - "all" - ], - "Res": [ - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5)", - "Plan": [ - "all" - ], - "Res": [ - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5", - "Plan": [ - "p0 p1 p2 p3 p4 p5" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 6", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6", - "Plan": [ - "all" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6)", - "Plan": [ - "all" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a != 7", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7", - "Plan": [ - "all" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7)", - "Plan": [ - "all" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - } - ] - }, - { - "Name": "TestRangePartitionBoundariesBetweenM", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND -1", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 0", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 2", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 10", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 999998", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 999999", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 1000002", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999997", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999998", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 999998", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000", - "Plan": [ - "p1" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001", - "Plan": [ - "p1" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012", - "Plan": [ - "p1" - ], - "Res": [ - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003", - "Plan": [ - "dual" - ], - "Res": null - } - ] - }, - { - "Name": "TestRangePartitionBoundariesBetweenS", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1, '1 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2, '2 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (3, '3 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (4, '4 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (5, '5 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (6, '6 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN -1 AND 4", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 0 AND 4", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 1 AND 4", - "Plan": [ - "p1 p2 p3 p4" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 2", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 4", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 3", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 3 AND 4", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 4", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 4 AND 4", - "Plan": [ - "p4" - ], - "Res": [ - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 5", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 5 AND 4", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 6", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 6 AND 4", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 2 AND 7", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a BETWEEN 7 AND 4", - "Plan": [ - "dual" - ], - "Res": null - } - ] - }, - { - "Name": "TestRangePartitionBoundariesLtM", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < -2147483648", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > -2147483648", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= -2147483648", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= -2147483648", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 0", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 0", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 0", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 999998", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999998", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999998", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 999999", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999999", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1000000", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1000002", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1000002", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000002", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 3000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 3000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < 3000001", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 3000001", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < 999997", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999997", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999997", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999997", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999997 AND a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999997 AND a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999997 AND a < 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999997 AND a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 999998", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999998", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999998", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999998 AND a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998 AND a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998 AND a < 1000000", - "Plan": [ - "p0" - ], - "Res": [ - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999998 AND a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 999999", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 999999", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999999", - "Plan": [ - "all" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 999999 AND a <= 1000001", - "Plan": [ - "p0 p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999 AND a <= 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999 AND a < 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 999999 AND a <= 1000001", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1000000", - "Plan": [ - "p0" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000 AND a <= 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000 AND a < 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1000000 AND a <= 1000002", - "Plan": [ - "p1" - ], - "Res": [ - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1999997", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999997", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1999997", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999997", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999997 AND a <= 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999997 AND a < 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999997 AND a <= 1999999", - "Plan": [ - "p1" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1999998", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999998", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1999998", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999998", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999998 AND a <= 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999998 AND a < 2000000", - "Plan": [ - "p1" - ], - "Res": [ - "1999999 1999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999998 AND a <= 2000000", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1999999", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999999", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1999999", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999999", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001", - "Plan": [ - "p1 p2" - ], - "Res": [ - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999999 AND a <= 2000001", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999999 AND a < 2000001", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1999999 AND a <= 2000001", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2000000", - "Plan": [ - "p0 p1" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2000000", - "Plan": [ - "p2" - ], - "Res": [ - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2000000", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002", - "Plan": [ - "p2" - ], - "Res": [ - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2000000 AND a <= 2000002", - "Plan": [ - "p2" - ], - "Res": [ - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2000000 AND a < 2000002", - "Plan": [ - "p2" - ], - "Res": [ - "2000001 2000001 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2000000 AND a <= 2000002", - "Plan": [ - "p2" - ], - "Res": [ - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2999997", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999997", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2999997", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999997", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999997 AND a <= 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999997 AND a < 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999997 AND a <= 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2999998", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999998", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2999998", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999998", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000", - "Plan": [ - "p2" - ], - "Res": [ - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999998 AND a <= 3000000", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999998 AND a < 3000000", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999998 AND a <= 3000000", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2999999", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999999", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2999999", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999999", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001", - "Plan": [ - "p2" - ], - "Res": [ - "2999999 2999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999999 AND a <= 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999999 AND a < 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 2999999 AND a <= 3000001", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < 3000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 3000000", - "Plan": [ - "all" - ], - "Res": [ - "-2147483648 MIN_INT filler...", - "0 0 Filler...", - "1000000 1000000 Filler ...", - "1000001 1000001 Filler ...", - "1000002 1000002 Filler ...", - "1999998 1999998 Filler ...", - "1999999 1999999 Filler ...", - "2000000 2000000 Filler ...", - "2000001 2000001 Filler ...", - "2000002 2000002 Filler ...", - "2999998 2999998 Filler ...", - "2999999 2999999 Filler ...", - "999998 999998 Filler ...", - "999999 999999 Filler ..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3000000", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000 AND a <= 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000 AND a < 3000002", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 3000000 AND a <= 3000002", - "Plan": [ - "dual" - ], - "Res": null - } - ] - }, - { - "Name": "TestRangePartitionBoundariesLtS", - "Cases": [ - { - "SQL": "INSERT INTO t VALUES (0, '0 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (1, '1 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (2, '2 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (3, '3 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (4, '4 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (5, '5 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "INSERT INTO t VALUES (6, '6 Filler...')", - "Plan": null, - "Res": null - }, - { - "SQL": "ANALYZE TABLE t", - "Plan": null, - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < -1", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= -1", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a >= -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > -1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= -1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > -1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= -1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= -1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 0", - "Plan": [ - "p0" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a > 0", - "Plan": [ - "p1 p2 p3 p4 p5 p6" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 0", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 0)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 0)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 0)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 0)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 0)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 0)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 0", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 0", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 1", - "Plan": [ - "p0" - ], - "Res": [ - "0 0 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 1", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 1", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 1", - "Plan": [ - "p1 p2 p3 p4 p5 p6" - ], - "Res": [ - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 1", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 1", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 2", - "Plan": [ - "p0 p1 p3 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 2", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 2)", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 2)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 2", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 2", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 2)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 2)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 2", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 2", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 2)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 2)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 2", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 2", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2)", - "Plan": [ - "p0 p1 p3 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 3", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 3", - "Plan": [ - "p4 p5 p6" - ], - "Res": [ - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 3", - "Plan": [ - "p0 p1 p2 p3" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 3", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 3", - "Plan": [ - "p0 p1 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 3", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 3)", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 3)", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 3", - "Plan": [ - "p0 p1 p3 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 3", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 3)", - "Plan": [ - "p2" - ], - "Res": [ - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 3)", - "Plan": [ - "p0 p1 p3 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 3", - "Plan": [ - "p0 p1 p2 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 3", - "Plan": [ - "p3" - ], - "Res": [ - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 3)", - "Plan": [ - "p3" - ], - "Res": [ - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 3)", - "Plan": [ - "p0 p1 p2 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 3", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 3", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3)", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3)", - "Plan": [ - "p0 p1 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 4", - "Plan": [ - "p0 p1 p2 p3" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 4", - "Plan": [ - "p5 p6" - ], - "Res": [ - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 4", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 4", - "Plan": [ - "p4 p5 p6" - ], - "Res": [ - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 4", - "Plan": [ - "p0 p1 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 4", - "Plan": [ - "p3" - ], - "Res": [ - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 4)", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 4)", - "Plan": [ - "p0 p1 p2 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 4", - "Plan": [ - "p0 p1 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 4", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 4)", - "Plan": [ - "p2 p3" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 4)", - "Plan": [ - "p0 p1 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 4", - "Plan": [ - "p0 p1 p2 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 4", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 4)", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 4)", - "Plan": [ - "p0 p1 p2 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 4", - "Plan": [ - "p0 p1 p2 p4 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 4", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4)", - "Plan": [ - "p3" - ], - "Res": [ - "3 3 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4)", - "Plan": [ - "p0 p1 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 5", - "Plan": [ - "p0 p1 p2 p3 p4" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 5", - "Plan": [ - "p6" - ], - "Res": [ - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 5", - "Plan": [ - "p0 p1 p2 p3 p4 p5" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 5", - "Plan": [ - "p5 p6" - ], - "Res": [ - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 5", - "Plan": [ - "p0 p1 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 5", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 5)", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 5)", - "Plan": [ - "p0 p1 p2 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 5", - "Plan": [ - "p0 p1 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 5", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 5)", - "Plan": [ - "p2 p3 p4" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 5)", - "Plan": [ - "p0 p1 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 5", - "Plan": [ - "p0 p1 p2 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 5", - "Plan": [ - "p3 p4 p5" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 5)", - "Plan": [ - "p3 p4 p5" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 5)", - "Plan": [ - "p0 p1 p2 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 5", - "Plan": [ - "p0 p1 p2 p5 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 5", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5)", - "Plan": [ - "p3 p4" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5)", - "Plan": [ - "p0 p1 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 6", - "Plan": [ - "p0 p1 p2 p3 p4 p5" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 6", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 6", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 6", - "Plan": [ - "p6" - ], - "Res": [ - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 6", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 6", - "Plan": [ - "p3 p4 p5" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 6)", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 6)", - "Plan": [ - "p0 p1 p2 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 6", - "Plan": [ - "p0 p1 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 6", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 6)", - "Plan": [ - "p2 p3 p4 p5" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 6)", - "Plan": [ - "p0 p1 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 6", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 6", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 6)", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 6)", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 6", - "Plan": [ - "p0 p1 p2 p6" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 6", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6)", - "Plan": [ - "p3 p4 p5" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 7", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 7", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a <= 7", - "Plan": [ - "all" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 7", - "Plan": [ - "dual" - ], - "Res": null - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a > 7", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a < 7", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a > 7)", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a < 7)", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a < 2 OR a >= 7", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a < 7", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a < 2 OR a >= 7)", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a < 7)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a > 7", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a > 2 AND a <= 7", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a > 7)", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a > 2 AND a <= 7)", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a <= 2 OR a >= 7", - "Plan": [ - "p0 p1 p2" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler...", - "2 2 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE a >= 2 AND a <= 7", - "Plan": [ - "p2 p3 p4 p5 p6" - ], - "Res": [ - "2 2 Filler...", - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7)", - "Plan": [ - "p3 p4 p5 p6" - ], - "Res": [ - "3 3 Filler...", - "4 4 Filler...", - "5 5 Filler...", - "6 6 Filler..." - ] - }, - { - "SQL": "SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7)", - "Plan": [ - "p0 p1" - ], - "Res": [ - "0 0 Filler...", - "1 1 Filler..." - ] - } - ] - } -] diff --git a/pkg/executor/union_scan.go b/pkg/executor/union_scan.go index 83d3cb06ddb98..947e7c9589068 100644 --- a/pkg/executor/union_scan.go +++ b/pkg/executor/union_scan.go @@ -298,7 +298,7 @@ func (ce compareExec) compare(sctx *stmtctx.StatementContext, a, b []types.Datum for _, colOff := range ce.usedIndex { aColumn := a[colOff] bColumn := b[colOff] - cmp, err = aColumn.Compare(sctx, &bColumn, ce.collators[colOff]) + cmp, err = aColumn.Compare(sctx.TypeCtx(), &bColumn, ce.collators[colOff]) if err != nil { return 0, err } diff --git a/pkg/executor/update_test.go b/pkg/executor/update_test.go index fdbc1590e6933..03586f8225c84 100644 --- a/pkg/executor/update_test.go +++ b/pkg/executor/update_test.go @@ -20,7 +20,6 @@ import ( "testing" "time" - "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/kv" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx/variable" @@ -28,407 +27,6 @@ import ( "github.com/stretchr/testify/require" ) -func TestUpdateGenColInTxn(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`create table t(a bigint, b bigint as (a+1));`) - tk.MustExec(`begin;`) - tk.MustExec(`insert into t(a) values(1);`) - err := tk.ExecToErr(`update t set b=6 where b=2;`) - require.Equal( - t, - "[planner:3105]The value specified for generated column 'b' in table 't' is not allowed.", - err.Error(), - ) - tk.MustExec(`commit;`) - tk.MustQuery(`select * from t;`).Check( - testkit.Rows( - `1 2`, - ), - ) -} - -func TestUpdateWithAutoidSchema(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`create table t1(id int primary key auto_increment, n int);`) - tk.MustExec(`create table t2(id int primary key, n float auto_increment, key I_n(n));`) - tk.MustExec(`create table t3(id int primary key, n double auto_increment, key I_n(n));`) - - tests := []struct { - exec string - query string - result [][]interface{} - }{ - { - `insert into t1 set n = 1`, - `select * from t1 where id = 1`, - testkit.Rows(`1 1`), - }, - { - `update t1 set id = id+1`, - `select * from t1 where id = 2`, - testkit.Rows(`2 1`), - }, - { - `insert into t1 set n = 2`, - `select * from t1 where id = 3`, - testkit.Rows(`3 2`), - }, - { - `update t1 set id = id + '1.1' where id = 3`, - `select * from t1 where id = 4`, - testkit.Rows(`4 2`), - }, - { - `insert into t1 set n = 3`, - `select * from t1 where id = 5`, - testkit.Rows(`5 3`), - }, - { - `update t1 set id = id + '0.5' where id = 5`, - `select * from t1 where id = 6`, - testkit.Rows(`6 3`), - }, - { - `insert into t1 set n = 4`, - `select * from t1 where id = 7`, - testkit.Rows(`7 4`), - }, - { - `insert into t2 set id = 1`, - `select * from t2 where id = 1`, - testkit.Rows(`1 1`), - }, - { - `update t2 set n = n+1`, - `select * from t2 where id = 1`, - testkit.Rows(`1 2`), - }, - { - `insert into t2 set id = 2`, - `select * from t2 where id = 2`, - testkit.Rows(`2 3`), - }, - { - `update t2 set n = n + '2.2'`, - `select * from t2 where id = 2`, - testkit.Rows(`2 5.2`), - }, - { - `insert into t2 set id = 3`, - `select * from t2 where id = 3`, - testkit.Rows(`3 6`), - }, - { - `update t2 set n = n + '0.5' where id = 3`, - `select * from t2 where id = 3`, - testkit.Rows(`3 6.5`), - }, - { - `insert into t2 set id = 4`, - `select * from t2 where id = 4`, - testkit.Rows(`4 7`), - }, - { - `insert into t3 set id = 1`, - `select * from t3 where id = 1`, - testkit.Rows(`1 1`), - }, - { - `update t3 set n = n+1`, - `select * from t3 where id = 1`, - testkit.Rows(`1 2`), - }, - { - `insert into t3 set id = 2`, - `select * from t3 where id = 2`, - testkit.Rows(`2 3`), - }, - { - `update t3 set n = n + '3.3'`, - `select * from t3 where id = 2`, - testkit.Rows(`2 6.3`), - }, - { - `insert into t3 set id = 3`, - `select * from t3 where id = 3`, - testkit.Rows(`3 7`), - }, - { - `update t3 set n = n + '0.5' where id = 3`, - `select * from t3 where id = 3`, - testkit.Rows(`3 7.5`), - }, - { - `insert into t3 set id = 4`, - `select * from t3 where id = 4`, - testkit.Rows(`4 8`), - }, - } - - for _, tt := range tests { - tk.MustExec(tt.exec) - tk.MustQuery(tt.query).Check(tt.result) - } -} - -func TestUpdateSchemaChange(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`create table t(a bigint, b bigint as (a+1));`) - tk.MustExec(`begin;`) - tk.MustExec(`insert into t(a) values(1);`) - err := tk.ExecToErr(`update t set b=6 where b=2;`) - require.Equal( - t, - "[planner:3105]The value specified for generated column 'b' in table 't' is not allowed.", - err.Error(), - ) - tk.MustExec(`commit;`) - tk.MustQuery(`select * from t;`).Check( - testkit.Rows( - `1 2`, - ), - ) -} - -func TestUpdateMultiDatabaseTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop database if exists test2") - tk.MustExec("create database test2") - tk.MustExec("create table t(a int, b int generated always as (a+1) virtual)") - tk.MustExec("create table test2.t(a int, b int generated always as (a+1) virtual)") - tk.MustExec("update t, test2.t set test.t.a=1") -} - -func TestUpdateSwapColumnValues(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t1, t2") - tk.MustExec("create table t1 (c_str varchar(40))") - tk.MustExec("create table t2 (c_str varchar(40))") - tk.MustExec("insert into t1 values ('Alice')") - tk.MustExec("insert into t2 values ('Bob')") - tk.MustQuery("select t1.c_str, t2.c_str from t1, t2 where t1.c_str <= t2.c_str").Check(testkit.Rows("Alice Bob")) - tk.MustExec("update t1, t2 set t1.c_str = t2.c_str, t2.c_str = t1.c_str where t1.c_str <= t2.c_str") - tk.MustQuery("select t1.c_str, t2.c_str from t1, t2 where t1.c_str <= t2.c_str").Check(testkit.Rows()) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int)") - tk.MustExec("insert into t values(1, 2)") - tk.MustQuery("select * from t").Check(testkit.Rows("1 2")) - tk.MustExec("update t set a=b, b=a") - tk.MustQuery("select * from t").Check(testkit.Rows("2 1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int)") - tk.MustExec("insert into t values (1,3)") - tk.MustQuery("select * from t").Check(testkit.Rows("1 3")) - tk.MustExec("update t set a=b, b=a") - tk.MustQuery("select * from t").Check(testkit.Rows("3 1")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int, c int as (-a) virtual, d int as (-b) stored)") - tk.MustExec("insert into t(a, b) values (10, 11), (20, 22)") - tk.MustQuery("select * from t").Check(testkit.Rows("10 11 -10 -11", "20 22 -20 -22")) - tk.MustExec("update t set a=b, b=a") - tk.MustQuery("select * from t").Check(testkit.Rows("11 10 -11 -10", "22 20 -22 -20")) - tk.MustExec("update t set b=30, a=b") - tk.MustQuery("select * from t").Check(testkit.Rows("10 30 -10 -30", "20 30 -20 -30")) -} - -func TestMultiUpdateOnSameTable(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(x int, y int)") - tk.MustExec("insert into t values()") - tk.MustExec("update t t1, t t2 set t2.y=1, t1.x=2") - tk.MustQuery("select * from t").Check(testkit.Rows("2 1")) - tk.MustExec("update t t1, t t2 set t1.x=t2.y, t2.y=t1.x") - tk.MustQuery("select * from t").Check(testkit.Rows("1 2")) - - // Update generated columns - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(x int, y int, z int as (x+10) stored, w int as (y-10) virtual)") - tk.MustExec("insert into t(x, y) values(1, 2), (3, 4)") - tk.MustExec("update t t1, t t2 set t2.y=1, t1.x=2 where t1.x=1") - tk.MustQuery("select * from t").Check(testkit.Rows("2 1 12 -9", "3 1 13 -9")) - - tk.MustExec("update t t1, t t2 set t1.x=5, t2.y=t1.x where t1.x=3") - tk.MustQuery("select * from t").Check(testkit.Rows("2 3 12 -7", "5 3 15 -7")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, b int, c int as (a+b) stored)") - tk.MustExec("insert into t(a, b) values (1, 2)") - tk.MustExec("update t t1, t t2 set t2.a=3") - tk.MustQuery("select * from t").Check(testkit.Rows("3 2 5")) - - tk.MustExec("update t t1, t t2 set t1.a=4, t2.b=5") - tk.MustQuery("select * from t").Check(testkit.Rows("4 5 9")) - - // Update primary keys - tk.MustExec("use test") - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int primary key)") - tk.MustExec("insert into t values (1), (2)") - tk.MustExec("update t set a=a+2") - tk.MustQuery("select * from t").Check(testkit.Rows("3", "4")) - tk.MustExec("update t m, t n set m.a = n.a+10 where m.a=n.a") - tk.MustQuery("select * from t").Check(testkit.Rows("13", "14")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int primary key, b int)") - tk.MustExec("insert into t values (1,3), (2,4)") - tk.MustGetErrMsg( - "update t m, t n set m.a = n.a+10, n.b = m.b+1 where m.a=n.a", - `[planner:1706]Primary key/partition key update is not allowed since the table is updated both as 'm' and 'n'.`, - ) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t (a int, b int, c int, primary key(a, b))") - tk.MustExec("insert into t values (1,3,5), (2,4,6)") - tk.MustExec("update t m, t n set m.a = n.a+10, m.b = n.b+10 where m.a=n.a") - tk.MustQuery("select * from t").Check(testkit.Rows("11 13 5", "12 14 6")) - tk.MustExec("update t m, t n, t q set q.c=m.a+n.b, n.c = m.a+1, m.c = n.b+1 where m.b=n.b AND m.a=q.a") - tk.MustQuery("select * from t").Check(testkit.Rows("11 13 24", "12 14 26")) - tk.MustGetErrMsg( - "update t m, t n, t q set m.a = m.a+1, n.c = n.c-1, q.c = q.a+q.b where m.b=n.b and n.b=q.b", - `[planner:1706]Primary key/partition key update is not allowed since the table is updated both as 'm' and 'n'.`, - ) -} - -func TestUpdateClusterIndex(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t(id varchar(200) primary key, v int)`) - tk.MustExec(`insert into t(id, v) values ('abc', 233)`) - tk.MustQuery(`select id, v from t where id = 'abc'`).Check(testkit.Rows("abc 233")) - tk.MustExec(`update t set id = 'dfg' where id = 'abc'`) - tk.MustQuery(`select * from t`).Check(testkit.Rows("dfg 233")) - tk.MustExec(`update t set id = 'aaa', v = 333 where id = 'dfg'`) - tk.MustQuery(`select * from t where id = 'aaa'`).Check(testkit.Rows("aaa 333")) - tk.MustExec(`update t set v = 222 where id = 'aaa'`) - tk.MustQuery(`select * from t where id = 'aaa'`).Check(testkit.Rows("aaa 222")) - tk.MustExec(`insert into t(id, v) values ('bbb', 111)`) - tk.MustGetErrCode(`update t set id = 'bbb' where id = 'aaa'`, errno.ErrDupEntry) - - tk.MustExec(`drop table if exists ut3pk`) - tk.MustExec(`create table ut3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3))`) - tk.MustExec(`insert into ut3pk(id1, id2, v, id3) values ('aaa', 'bbb', 233, 111)`) - tk.MustQuery(`select id1, id2, id3, v from ut3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111`).Check(testkit.Rows("aaa bbb 111 233")) - tk.MustExec(`update ut3pk set id1 = 'abc', id2 = 'bbb2', id3 = 222, v = 555 where id1 = 'aaa' and id2 = 'bbb' and id3 = 111`) - tk.MustQuery(`select id1, id2, id3, v from ut3pk where id1 = 'abc' and id2 = 'bbb2' and id3 = 222`).Check(testkit.Rows("abc bbb2 222 555")) - tk.MustQuery(`select id1, id2, id3, v from ut3pk`).Check(testkit.Rows("abc bbb2 222 555")) - tk.MustExec(`update ut3pk set v = 666 where id1 = 'abc' and id2 = 'bbb2' and id3 = 222`) - tk.MustQuery(`select id1, id2, id3, v from ut3pk`).Check(testkit.Rows("abc bbb2 222 666")) - tk.MustExec(`insert into ut3pk(id1, id2, id3, v) values ('abc', 'bbb3', 222, 777)`) - tk.MustGetErrCode( - `update ut3pk set id2 = 'bbb3' where id1 = 'abc' and id2 = 'bbb2' and id3 = 222`, - errno.ErrDupEntry, - ) - - tk.MustExec(`drop table if exists ut1pku`) - tk.MustExec(`create table ut1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk))`) - tk.MustExec(`insert into ut1pku(id, uk, v) values('a', 1, 2), ('b', 2, 3)`) - tk.MustQuery(`select * from ut1pku`).Check(testkit.Rows("a 1 2", "b 2 3")) - tk.MustExec(`update ut1pku set uk = 3 where id = 'a'`) - tk.MustQuery(`select * from ut1pku`).Check(testkit.Rows("a 3 2", "b 2 3")) - tk.MustGetErrCode(`update ut1pku set uk = 2 where id = 'a'`, errno.ErrDupEntry) - tk.MustQuery(`select * from ut1pku`).Check(testkit.Rows("a 3 2", "b 2 3")) - - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a char(10) primary key, b char(10));") - tk.MustExec("insert into t values('a', 'b');") - tk.MustExec("update t set a='c' where t.a='a' and b='b';") - tk.MustQuery("select * from t").Check(testkit.Rows("c b")) - - tk.MustExec("drop table if exists s") - tk.MustExec("create table s (a int, b int, c int, primary key (a, b))") - tk.MustExec("insert s values (3, 3, 3), (5, 5, 5)") - tk.MustExec("update s set c = 10 where a = 3") - tk.MustQuery("select * from s").Check(testkit.Rows("3 3 10", "5 5 5")) -} - -func TestDeleteClusterIndex(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t(id varchar(200) primary key, v int)`) - tk.MustExec(`insert into t(id, v) values ('abc', 233)`) - tk.MustExec(`delete from t where id = 'abc'`) - tk.MustQuery(`select * from t`).Check(testkit.Rows()) - tk.MustQuery(`select * from t where id = 'abc'`).Check(testkit.Rows()) - - tk.MustExec(`drop table if exists it3pk`) - tk.MustExec(`create table it3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3))`) - tk.MustExec(`insert into it3pk(id1, id2, v, id3) values ('aaa', 'bbb', 233, 111)`) - tk.MustExec(`delete from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111`) - tk.MustQuery(`select * from it3pk`).Check(testkit.Rows()) - tk.MustQuery(`select * from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111`).Check(testkit.Rows()) - tk.MustExec(`insert into it3pk(id1, id2, v, id3) values ('aaa', 'bbb', 433, 111)`) - tk.MustQuery(`select * from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111`).Check(testkit.Rows("aaa bbb 433 111")) - - tk.MustExec(`drop table if exists dt3pku`) - tk.MustExec(`create table dt3pku(id varchar(200) primary key, uk int, v int, unique key uuk(uk))`) - tk.MustExec(`insert into dt3pku(id, uk, v) values('a', 1, 2)`) - tk.MustExec(`delete from dt3pku where id = 'a'`) - tk.MustQuery(`select * from dt3pku`).Check(testkit.Rows()) - tk.MustExec(`insert into dt3pku(id, uk, v) values('a', 1, 2)`) - - tk.MustExec("drop table if exists s1") - tk.MustExec("create table s1 (a int, b int, c int, primary key (a, b))") - tk.MustExec("insert s1 values (3, 3, 3), (5, 5, 5)") - tk.MustExec("delete from s1 where a = 3") - tk.MustQuery("select * from s1").Check(testkit.Rows("5 5 5")) -} - -func TestReplaceClusterIndex(t *testing.T) { - store := testkit.CreateMockStore(t) - - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn - - tk.MustExec(`drop table if exists rt1pk`) - tk.MustExec(`create table rt1pk(id varchar(200) primary key, v int)`) - tk.MustExec(`replace into rt1pk(id, v) values('abc', 1)`) - tk.MustQuery(`select * from rt1pk`).Check(testkit.Rows("abc 1")) - tk.MustExec(`replace into rt1pk(id, v) values('bbb', 233), ('abc', 2)`) - tk.MustQuery(`select * from rt1pk`).Check(testkit.Rows("abc 2", "bbb 233")) - - tk.MustExec(`drop table if exists rt3pk`) - tk.MustExec(`create table rt3pk(id1 timestamp, id2 time, v int, id3 year, primary key(id1, id2, id3))`) - tk.MustExec(`replace into rt3pk(id1, id2,id3, v) values('2018-01-01 11:11:11', '22:22:22', '2019', 1)`) - tk.MustQuery(`select * from rt3pk`).Check(testkit.Rows("2018-01-01 11:11:11 22:22:22 1 2019")) - tk.MustExec(`replace into rt3pk(id1, id2, id3, v) values('2018-01-01 11:11:11', '22:22:22', '2019', 2)`) - tk.MustQuery(`select * from rt3pk`).Check(testkit.Rows("2018-01-01 11:11:11 22:22:22 2 2019")) - - tk.MustExec(`drop table if exists rt1pk1u`) - tk.MustExec(`create table rt1pk1u(id varchar(200) primary key, uk int, v int, unique key uuk(uk))`) - tk.MustExec(`replace into rt1pk1u(id, uk, v) values("abc", 2, 1)`) - tk.MustQuery(`select * from rt1pk1u`).Check(testkit.Rows("abc 2 1")) - tk.MustExec(`replace into rt1pk1u(id, uk, v) values("aaa", 2, 11)`) - tk.MustQuery(`select * from rt1pk1u`).Check(testkit.Rows("aaa 2 11")) -} - func TestPessimisticUpdatePKLazyCheck(t *testing.T) { store := testkit.CreateMockStore(t) @@ -471,19 +69,6 @@ func getPresumeExistsCount(t *testing.T, se session.Session) int { return presumeNotExistsCnt } -func TestOutOfRangeWithUnsigned(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists t`) - tk.MustExec(`create table t(ts int(10) unsigned NULL DEFAULT NULL)`) - tk.MustExec(`insert into t values(1)`) - tk.MustGetErrMsg( - "update t set ts = IF(ts < (0 - ts), 1,1) where ts>0", - "[types:1690]BIGINT UNSIGNED value is out of range in '(0 - test.t.ts)'", - ) -} - func TestIssue21447(t *testing.T) { store := testkit.CreateMockStore(t) @@ -510,29 +95,6 @@ func TestIssue21447(t *testing.T) { tk1.MustExec("commit") } -func TestIssue23553(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec(`drop table if exists tt`) - tk.MustExec(`create table tt (m0 varchar(64), status tinyint not null)`) - tk.MustExec(`insert into tt values('1',0),('1',0),('1',0)`) - tk.MustExec(`update tt a inner join (select m0 from tt where status!=1 group by m0 having count(*)>1) b on a.m0=b.m0 set a.status=1`) -} - -// see issue https://github.com/pingcap/tidb/issues/47816 -func TestUpdateUnsignedWithOverflow(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec(`use test`) - tk.MustExec("create table t1(id int, a int unsigned)") - tk.MustExec("set sql_mode=''") - tk.MustExec("insert into t1 values(1, 10), (2, 20)") - tk.MustExec("update t1 set a='-1' where id=1") - tk.MustExec("update t1 set a='1000000000000000000' where id=2") - tk.MustQuery("select id, a from t1 order by id asc").Check(testkit.Rows("1 0", "2 4294967295")) -} - func TestLockUnchangedUniqueKeys(t *testing.T) { store := testkit.CreateMockStore(t) diff --git a/pkg/executor/window_test.go b/pkg/executor/window_test.go index 3404acfea4d61..57094f5fc8ec3 100644 --- a/pkg/executor/window_test.go +++ b/pkg/executor/window_test.go @@ -28,9 +28,6 @@ func TestWindowFunctions(t *testing.T) { tk := testkit.NewTestKit(t, store) tk.MustExec("set @@tidb_window_concurrency = 1") tk.MustExec("set @@tidb_enable_pipelined_window_function = 0") - defer func() { - tk.MustExec("set @@tidb_enable_pipelined_window_function=1;") - }() doTestWindowFunctions(tk) // TestWindowParallelFunctions @@ -244,34 +241,18 @@ func TestSlidingWindowFunctions(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test;") - tk.MustExec("set @@tidb_enable_pipelined_window_function=0;") - defer func() { - tk.MustExec("set @@tidb_enable_pipelined_window_function=1;") - }() - idTypes := []string{"FLOAT", "DOUBLE"} - useHighPrecisions := []string{"ON", "OFF"} - for _, idType := range idTypes { - for _, useHighPrecision := range useHighPrecisions { - tk.MustExec("drop table if exists t;") - tk.MustExec(fmt.Sprintf("CREATE TABLE t (id %s, sex CHAR(1));", idType)) - tk.MustExec(fmt.Sprintf("SET SESSION windowing_use_high_precision = %s;", useHighPrecision)) - baseTestSlidingWindowFunctions(tk) - } - } -} - -func TestPipelinedSlidingWindowFunctions(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test;") + enablePipeLined := []string{"0", "1"} idTypes := []string{"FLOAT", "DOUBLE"} useHighPrecisions := []string{"ON", "OFF"} - for _, idType := range idTypes { - for _, useHighPrecision := range useHighPrecisions { - tk.MustExec("drop table if exists t;") - tk.MustExec(fmt.Sprintf("CREATE TABLE t (id %s, sex CHAR(1));", idType)) - tk.MustExec(fmt.Sprintf("SET SESSION windowing_use_high_precision = %s;", useHighPrecision)) - baseTestSlidingWindowFunctions(tk) + for _, enabled := range enablePipeLined { + tk.MustExec(fmt.Sprintf("set @@tidb_enable_pipelined_window_function=%s;", enabled)) + for _, idType := range idTypes { + for _, useHighPrecision := range useHighPrecisions { + tk.MustExec("drop table if exists t;") + tk.MustExec(fmt.Sprintf("CREATE TABLE t (id %s, sex CHAR(1));", idType)) + tk.MustExec(fmt.Sprintf("SET SESSION windowing_use_high_precision = %s;", useHighPrecision)) + baseTestSlidingWindowFunctions(tk) + } } } } @@ -443,56 +424,6 @@ func baseTestSlidingWindowFunctions(tk *testkit.TestKit) { Check(testkit.Rows(" 11", " 11", "M 5", "F 5", "F 4", "F 3", "M 2")) } -func TestIssue24264(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec("drop table if exists tbl_2") - tk.MustExec("create table tbl_2 ( col_10 char(65) collate utf8mb4_unicode_ci not null , col_11 bigint not null , col_12 datetime not null , col_13 bigint unsigned default 327695751717730004 , col_14 timestamp default '2010-11-18' not null , primary key idx_5 ( col_11,col_13 ) /*T![clustered_index] clustered */ , unique key idx_6 ( col_10,col_11,col_13 ) , unique key idx_7 ( col_14,col_12,col_13 ) )") - tk.MustExec("insert into tbl_2 values ( 'RmF',-5353757041350034197,'1996-01-22',1866803697729291364,'1996-09-11' )") - tk.MustExec("insert into tbl_2 values ( 'xEOGaB',-6602924241498980347,'2019-02-22',8297270320597030697,'1972-04-04' )") - tk.MustExec("insert into tbl_2 values ( 'dvUztqgTPAhLdzgEsV',3316448219481769821,'2034-09-12',937089564901142512,'2030-12-04' )") - tk.MustExec("insert into tbl_2 values ( 'mNoyfbT',-6027094365061219400,'2035-10-10',1752804734961508175,'1992-08-09' )") - tk.MustExec("insert into tbl_2 values ( 'BDPJMhLYXuKB',6823702503458376955,'2015-04-09',737914379167848827,'2026-04-29' )") - tk.MustExec("insert into tbl_2 values ( 'WPiaVfPstGohvHd',1308183537252932688,'2020-05-03',5364104746649397703,'1979-01-28' )") - tk.MustExec("insert into tbl_2 values ( 'lrm',4642935044097656317,'1973-04-29',149081313305673035,'2013-02-03' )") - tk.MustExec("insert into tbl_2 values ( '',-7361040853169906422,'2024-10-22',6308270832310351889,'1981-02-01' )") - tk.MustExec("insert into tbl_2 values ( 'uDANahGcLwpSssabD',2235074865448210231,'1992-10-10',7140606140672586593,'1992-11-25' )") - tk.MustExec("insert into tbl_2 values ( 'TDH',-1911014243756021618,'2013-01-26',2022218243939205750,'1982-04-04' )") - tk.MustQuery("select lead(col_13,1,NULL) over w from tbl_2 window w as (order by col_13)").Check(testkit.Rows( - "737914379167848827", - "937089564901142512", - "1752804734961508175", - "1866803697729291364", - "2022218243939205750", - "5364104746649397703", - "6308270832310351889", - "7140606140672586593", - "8297270320597030697", - "")) -} - -func TestIssue29947(t *testing.T) { - store := testkit.CreateMockStore(t) - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - tk.MustExec(`drop table if exists t_tir89b, t_vejdy`) - - tk.MustExec("CREATE TABLE `t_tir89b` (`c_3pcik` int(11) DEFAULT NULL,`c_0b6nxb` text DEFAULT NULL,`c_qytrlc` double NOT NULL,`c_sroc_c` int(11) DEFAULT NULL,PRIMARY KEY (`c_qytrlc`) /*T![clustered_index] NONCLUSTERED */ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;") - - tk.MustExec(`INSERT INTO t_tir89b VALUES (66,'cjd1o',87.77,NULL),(134217728,'d_unpd',76.66,NULL),(50,'_13gs',1.46,32),(49,'xclvsc',64.7,48),(7,'1an13',70.86,7),(29,NULL,6.26,6),(8,'hc485b',47.44,2),(84,'d_nlmd',99.3,76),(14,'lbny1c',61.1,47),(45,'9r5bid',25.37,95),(49,'jbz5r',72.99,49),(18,'uode3d',7.21,992),(-8945040,'ftrtib',47.47,20),(29,'algrj',6.28,24),(96,NULL,67.83,24),(5,'s1gfz',89.18,78),(74,'ggqbl',83.89,68),(61,'5n1q7',26.92,6),(10,'4gflb',33.84,28),(48,'xoe0cd',84.71,77),(6,'xkh6i',53.83,19),(5,NULL,89.1,46),(49,'4q6nx',31.5,384),(1,'pgs1',66.8,77),(19,'lltflc',33.49,63),(87,'vd4htc',39.92,-5367008),(47,NULL,28.3,10),(29,'15jqfc',100.11,64),(45,'ii6pm',52.41,61),(0,NULL,85.27,19),(104,'ikpxnb',40.66,955),(40,'gzryzd',36.23,42),(18,'7UPNE',84.27,14),(32,NULL,84.8,53),(51,'2c5lfb',18.98,74),(97,NULL,22.89,6),(70,'guyzyc',96.29,89),(34,'dvdoqb',53.82,1),(94,'6eop6b',81.77,90),(42,'p7vsnd',62.54,NULL);`) - - tk.MustExec("CREATE TABLE `t_vejdy` (`c_iovir` int(11) NOT NULL,`c_r_mw3d` double DEFAULT NULL,`c_uxhghb` int(11) DEFAULT NULL,`c_rb7otb` int(11) NOT NULL,`c_dplyac` int(11) DEFAULT NULL,`c_lmcqed` double DEFAULT NULL,`c_ayaoed` text DEFAULT NULL,`c__zbqr` int(11) DEFAULT NULL,PRIMARY KEY (`c_iovir`,`c_rb7otb`) /*T![clustered_index] NONCLUSTERED */,KEY `t_e1ejcd` (`c_uxhghb`),KEY `t_o6ui_b` (`c_iovir`,`c_r_mw3d`,`c_uxhghb`,`c_rb7otb`,`c_dplyac`,`c_lmcqed`)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin;") - - tk.MustExec(`INSERT INTO t_vejdy VALUES (49,100.11,68,57,44,17.93,NULL,84),(38,56.91,78,30,0,53.28,'cjd1o',2),(6,NULL,NULL,88,81,93.47,'0jftkb',54),(73,91.51,31,82,3,38.12,'buesob',40),(7,26.73,7,78,9,NULL,'fd5kgd',49),(80,70.57,4,47,43,25.59,'glpoq',44),(79,94.16,15,0,0,79.55,'0ok94d',56),(58,NULL,50,69,2,65.46,'sm6rj',29),(41472,6.51,70,1080,100,43.18,'fofk4c',43),(0,6.2,57,97,2,56.17,'zqpzq',56),(72,76.66,97,88,95,75.47,'hikxqb',34),(27,1.11,134217728,57,25,NULL,'4gflb',0),(64,NULL,47,69,6,72.5,'w7jmhd',45),(-134217679,88.74,33,82,85,59.89,NULL,26),(59,97.98,37,28,33,61.1,'xioxdd',45),(6,47.31,0,0,-19,38.77,'uxmdlc',17),(82,28.62,36,70,39,11.79,'zzi8cc',2),(33,37.3,55,86,69,60.56,'mn_xx',0),(7,NULL,80,0,17,59.79,'5n1q7',97),(88,50.81,15,30,63,25.37,'ordwed',29),(48,4.32,90,48,38,84.62,'lclx',32),(10,NULL,95,75,1,21.64,NULL,85),(62,NULL,0,30,10,NULL,'7bacud',5),(50,38.81,6,0,6,64.28,'gpibn',57),(1,46.8,21,32,46,33.38,NULL,6),(29,NULL,38,7,91,31.5,'pdzdl',24),(54,6.26,1,85,22,75.63,'gl4_7',29),(1,90.37,63,63,6,61.2,'wvw23b',86),(47,NULL,82,73,0,95.79,'uipcf',NULL),(46,48.1,37,6,1,52.33,'gthpic',0),(41,75.1,7,44,5,84.16,'fe_e5',58),(43,87.71,81,32,28,91.98,'9e5nvc',66),(20,58.21,88,75,92,43.64,'kagroc',66),(91,52.75,22,14,80,NULL,'\'_YN6MD\'',6),(72,94.83,0,49,5,57.82,NULL,23),(7,100.11,0,92,13,6.28,NULL,0);`) - - tk.MustExec("begin") - tk.MustExec("delete from t_tir89b where t_tir89b.c_3pcik >= t_tir89b.c_sroc_c;") - result := tk.MustQuery("select * from (select count(*) over (partition by ref_0.c_0b6nxb order by ref_0.c_3pcik) as c0 from t_tir89b as ref_0) as subq_0 where subq_0.c0 <> 1;") - result.Check(testkit.Rows("2", "3")) - tk.MustExec("commit") -} - func testReturnColumnNullableAttribute(tk *testkit.TestKit, funcName string, isNullable bool) { rs, err := tk.ExecWithContext(context.Background(), fmt.Sprintf("select %s over (partition by p order by o rows between 1 preceding and 1 following) as a from agg;", funcName)) tk.RequireNoError(err, "testReturnColumnNullableAttribute get error") diff --git a/pkg/executor/write.go b/pkg/executor/write.go index 39187586979d5..28733152208ea 100644 --- a/pkg/executor/write.go +++ b/pkg/executor/write.go @@ -90,7 +90,7 @@ func updateRecord( // Compare datum, then handle some flags. for i, col := range t.Cols() { // We should use binary collation to compare datum, otherwise the result will be incorrect. - cmp, err := newData[i].Compare(sc, &oldData[i], collate.GetBinaryCollator()) + cmp, err := newData[i].Compare(sc.TypeCtx(), &oldData[i], collate.GetBinaryCollator()) if err != nil { return false, err } diff --git a/pkg/expression/BUILD.bazel b/pkg/expression/BUILD.bazel index aad54a9e037de..90b1d64409fbd 100644 --- a/pkg/expression/BUILD.bazel +++ b/pkg/expression/BUILD.bazel @@ -117,7 +117,6 @@ go_library( "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_tipb//go-tipb", - "@com_github_pkg_errors//:errors", "@com_github_tikv_client_go_v2//oracle", "@org_golang_x_tools//container/intsets", "@org_uber_go_atomic//:atomic", @@ -223,6 +222,7 @@ go_test( "//pkg/util/mathutil", "//pkg/util/mock", "//pkg/util/printer", + "//pkg/util/sqlkiller", "//pkg/util/timeutil", "@com_github_gogo_protobuf//proto", "@com_github_google_uuid//:uuid", diff --git a/pkg/expression/aggregation/bit_and.go b/pkg/expression/aggregation/bit_and.go index 2d975f6563fcc..8d7adb6593850 100644 --- a/pkg/expression/aggregation/bit_and.go +++ b/pkg/expression/aggregation/bit_and.go @@ -47,7 +47,7 @@ func (bf *bitAndFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.Statem if value.Kind() == types.KindUint64 { evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() & value.GetUint64()) } else { - int64Value, err := value.ToInt64(sc) + int64Value, err := value.ToInt64(sc.TypeCtx()) if err != nil { return err } diff --git a/pkg/expression/aggregation/bit_or.go b/pkg/expression/aggregation/bit_or.go index 409085855919c..35c72a0e70a1d 100644 --- a/pkg/expression/aggregation/bit_or.go +++ b/pkg/expression/aggregation/bit_or.go @@ -45,7 +45,7 @@ func (bf *bitOrFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.Stateme if value.Kind() == types.KindUint64 { evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() | value.GetUint64()) } else { - int64Value, err := value.ToInt64(sc) + int64Value, err := value.ToInt64(sc.TypeCtx()) if err != nil { return err } diff --git a/pkg/expression/aggregation/bit_xor.go b/pkg/expression/aggregation/bit_xor.go index c3c97d5bd1712..47582cf844f15 100644 --- a/pkg/expression/aggregation/bit_xor.go +++ b/pkg/expression/aggregation/bit_xor.go @@ -45,7 +45,7 @@ func (bf *bitXorFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.Statem if value.Kind() == types.KindUint64 { evalCtx.Value.SetUint64(evalCtx.Value.GetUint64() ^ value.GetUint64()) } else { - int64Value, err := value.ToInt64(sc) + int64Value, err := value.ToInt64(sc.TypeCtx()) if err != nil { return err } diff --git a/pkg/expression/aggregation/max_min.go b/pkg/expression/aggregation/max_min.go index 2dd7696cdf9e7..cffdae9a2e22e 100644 --- a/pkg/expression/aggregation/max_min.go +++ b/pkg/expression/aggregation/max_min.go @@ -51,7 +51,7 @@ func (mmf *maxMinFunction) Update(evalCtx *AggEvaluateContext, sc *stmtctx.State return nil } var c int - c, err = evalCtx.Value.Compare(sc, &value, mmf.ctor) + c, err = evalCtx.Value.Compare(sc.TypeCtx(), &value, mmf.ctor) if err != nil { return err } diff --git a/pkg/expression/builtin_cast.go b/pkg/expression/builtin_cast.go index d01710fa044e1..49230bd12b5d5 100644 --- a/pkg/expression/builtin_cast.go +++ b/pkg/expression/builtin_cast.go @@ -480,9 +480,7 @@ var fakeSctx = newFakeSctx() func newFakeSctx() *stmtctx.StatementContext { sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(types.StrictFlags. - WithClipNegativeToZero(true), - ) + sc.SetTypeFlags(types.StrictFlags) return sc } @@ -543,7 +541,8 @@ func convertJSON2Tp(evalType types.EvalType) func(*stmtctx.StatementContext, typ if item.TypeCode != types.JSONTypeCodeInt64 && item.TypeCode != types.JSONTypeCodeUint64 { return nil, ErrInvalidJSONForFuncIndex } - jsonToInt, err := types.ConvertJSONToInt(sc, item, mysql.HasUnsignedFlag(tp.GetFlag()), tp.GetType()) + jsonToInt, err := types.ConvertJSONToInt(sc.TypeCtx(), item, mysql.HasUnsignedFlag(tp.GetFlag()), tp.GetType()) + err = sc.HandleOverflow(err, err) if mysql.HasUnsignedFlag(tp.GetFlag()) { return uint64(jsonToInt), err } @@ -704,7 +703,9 @@ func (b *builtinCastIntAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyDe } else { res = types.NewDecFromUint(uint64(val)) } - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, b.ctx.GetSessionVars().StmtCtx) + sc := b.ctx.GetSessionVars().StmtCtx + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, isNull, err } @@ -756,9 +757,9 @@ func (b *builtinCastIntAsTimeSig) evalTime(row chunk.Row) (res types.Time, isNul } if b.args[0].GetType().GetType() == mysql.TypeYear { - res, err = types.ParseTimeFromYear(b.ctx.GetSessionVars().StmtCtx, val) + res, err = types.ParseTimeFromYear(val) } else { - res, err = types.ParseTimeFromNum(b.ctx.GetSessionVars().StmtCtx, val, b.tp.GetType(), b.tp.GetDecimal()) + res, err = types.ParseTimeFromNum(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), val, b.tp.GetType(), b.tp.GetDecimal()) } if err != nil { @@ -1020,7 +1021,9 @@ func (b *builtinCastRealAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyD return res, false, err } } - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, b.ctx.GetSessionVars().StmtCtx) + sc := b.ctx.GetSessionVars().StmtCtx + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1075,7 +1078,7 @@ func (b *builtinCastRealAsTimeSig) evalTime(row chunk.Row) (types.Time, bool, er return types.ZeroTime, false, nil } sc := b.ctx.GetSessionVars().StmtCtx - res, err := types.ParseTimeFromFloatString(sc, fv, b.tp.GetType(), b.tp.GetDecimal()) + res, err := types.ParseTimeFromFloatString(sc.TypeCtx(), fv, b.tp.GetType(), b.tp.GetDecimal()) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } @@ -1101,7 +1104,7 @@ func (b *builtinCastRealAsDurationSig) evalDuration(row chunk.Row) (res types.Du if isNull || err != nil { return res, isNull, err } - res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(val, 'f', -1, 64), b.tp.GetDecimal()) + res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), strconv.FormatFloat(val, 'f', -1, 64), b.tp.GetDecimal()) if err != nil { if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) @@ -1132,7 +1135,8 @@ func (b *builtinCastDecimalAsDecimalSig) evalDecimal(row chunk.Row) (res *types. *res = *evalDecimal } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1253,7 +1257,7 @@ func (b *builtinCastDecimalAsTimeSig) evalTime(row chunk.Row) (res types.Time, i return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ParseTimeFromFloatString(sc, string(val.ToString()), b.tp.GetType(), b.tp.GetDecimal()) + res, err = types.ParseTimeFromFloatString(sc.TypeCtx(), string(val.ToString()), b.tp.GetType(), b.tp.GetDecimal()) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } @@ -1279,7 +1283,7 @@ func (b *builtinCastDecimalAsDurationSig) evalDuration(row chunk.Row) (res types if isNull || err != nil { return res, true, err } - res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(val.ToString()), b.tp.GetDecimal()) + res, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), string(val.ToString()), b.tp.GetDecimal()) if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) // ErrTruncatedWrongVal needs to be considered NULL. @@ -1420,7 +1424,7 @@ func (b *builtinCastStringAsRealSig) evalReal(row chunk.Row) (res float64, isNul if b.inUnion && mysql.HasUnsignedFlag(b.tp.GetFlag()) && res < 0 { res = 0 } - res, err = types.ProduceFloatWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceFloatWithSpecifiedTp(res, b.tp) return res, false, err } @@ -1456,7 +1460,8 @@ func (b *builtinCastStringAsDecimalSig) evalDecimal(row chunk.Row) (res *types.M return res, false, err } } - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1476,7 +1481,7 @@ func (b *builtinCastStringAsTimeSig) evalTime(row chunk.Row) (res types.Time, is return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ParseTime(sc, val, b.tp.GetType(), b.tp.GetDecimal(), nil) + res, err = types.ParseTime(sc.TypeCtx(), val, b.tp.GetType(), b.tp.GetDecimal(), nil) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } @@ -1505,7 +1510,7 @@ func (b *builtinCastStringAsDurationSig) evalDuration(row chunk.Row) (res types. if isNull || err != nil { return res, isNull, err } - res, isNull, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, val, b.tp.GetDecimal()) + res, isNull, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), val, b.tp.GetDecimal()) if types.ErrTruncatedWrongVal.Equal(err) { sc := b.ctx.GetSessionVars().StmtCtx err = sc.HandleTruncate(err) @@ -1530,10 +1535,10 @@ func (b *builtinCastTimeAsTimeSig) evalTime(row chunk.Row) (res types.Time, isNu } sc := b.ctx.GetSessionVars().StmtCtx - if res, err = res.Convert(sc, b.tp.GetType()); err != nil { + if res, err = res.Convert(sc.TypeCtx(), b.tp.GetType()); err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } - res, err = res.RoundFrac(sc, b.tp.GetDecimal()) + res, err = res.RoundFrac(sc.TypeCtx(), b.tp.GetDecimal()) if b.tp.GetType() == mysql.TypeDate { // Truncate hh:mm:ss part if the type is Date. res.SetCoreTime(types.FromDate(res.Year(), res.Month(), res.Day(), 0, 0, 0, 0)) @@ -1558,7 +1563,7 @@ func (b *builtinCastTimeAsIntSig) evalInt(row chunk.Row) (res int64, isNull bool return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - t, err := val.RoundFrac(sc, types.DefaultFsp) + t, err := val.RoundFrac(sc.TypeCtx(), types.DefaultFsp) if err != nil { return res, false, err } @@ -1601,7 +1606,8 @@ func (b *builtinCastTimeAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyD return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ProduceDecWithSpecifiedTp(val.ToNumber(), b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), val.ToNumber(), b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1734,7 +1740,8 @@ func (b *builtinCastDurationAsDecimalSig) evalDecimal(row chunk.Row) (res *types return res, false, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ProduceDecWithSpecifiedTp(val.ToNumber(), b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), val.ToNumber(), b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1798,11 +1805,11 @@ func (b *builtinCastDurationAsTimeSig) evalTime(row chunk.Row) (res types.Time, if err != nil { ts = gotime.Now() } - res, err = val.ConvertToTimeWithTimestamp(sc, b.tp.GetType(), ts) + res, err = val.ConvertToTimeWithTimestamp(sc.TypeCtx(), b.tp.GetType(), ts) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } - res, err = res.RoundFrac(sc, b.tp.GetDecimal()) + res, err = res.RoundFrac(sc.TypeCtx(), b.tp.GetDecimal()) return res, false, err } @@ -1836,7 +1843,8 @@ func (b *builtinCastJSONAsIntSig) evalInt(row chunk.Row) (res int64, isNull bool return res, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ConvertJSONToInt64(sc, val, mysql.HasUnsignedFlag(b.tp.GetFlag())) + res, err = types.ConvertJSONToInt64(sc.TypeCtx(), val, mysql.HasUnsignedFlag(b.tp.GetFlag())) + err = sc.HandleOverflow(err, err) return } @@ -1880,7 +1888,8 @@ func (b *builtinCastJSONAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyD if err != nil { return res, false, err } - res, err = types.ProduceDecWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), res, b.tp) + err = sc.HandleOverflow(err, err) return res, false, err } @@ -1939,11 +1948,11 @@ func (b *builtinCastJSONAsTimeSig) evalTime(row chunk.Row) (res types.Time, isNu if err != nil { ts = gotime.Now() } - res, err = duration.ConvertToTimeWithTimestamp(sc, b.tp.GetType(), ts) + res, err = duration.ConvertToTimeWithTimestamp(sc.TypeCtx(), b.tp.GetType(), ts) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } - res, err = res.RoundFrac(sc, b.tp.GetDecimal()) + res, err = res.RoundFrac(sc.TypeCtx(), b.tp.GetDecimal()) return res, isNull, err case types.JSONTypeCodeString: s, err := val.Unquote() @@ -1951,7 +1960,7 @@ func (b *builtinCastJSONAsTimeSig) evalTime(row chunk.Row) (res types.Time, isNu return res, false, err } sc := b.ctx.GetSessionVars().StmtCtx - res, err = types.ParseTime(sc, s, b.tp.GetType(), b.tp.GetDecimal(), nil) + res, err = types.ParseTime(sc.TypeCtx(), s, b.tp.GetType(), b.tp.GetDecimal(), nil) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } @@ -2001,7 +2010,7 @@ func (b *builtinCastJSONAsDurationSig) evalDuration(row chunk.Row) (res types.Du if err != nil { return res, false, err } - res, _, err = types.ParseDuration(stmtCtx, s, b.tp.GetDecimal()) + res, _, err = types.ParseDuration(stmtCtx.TypeCtx(), s, b.tp.GetDecimal()) if types.ErrTruncatedWrongVal.Equal(err) { sc := b.ctx.GetSessionVars().StmtCtx err = sc.HandleTruncate(err) diff --git a/pkg/expression/builtin_cast_vec.go b/pkg/expression/builtin_cast_vec.go index d06e9d09985cd..d4dcd97b07e23 100644 --- a/pkg/expression/builtin_cast_vec.go +++ b/pkg/expression/builtin_cast_vec.go @@ -307,8 +307,8 @@ func (b *builtinCastTimeAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result } *dec = types.MyDecimal{} times[i].FillNumber(dec) - dec, err = types.ProduceDecWithSpecifiedTp(dec, b.tp, sc) - if err != nil { + dec, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), dec, b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } decs[i] = *dec @@ -385,9 +385,9 @@ func (b *builtinCastIntAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk. } if b.args[0].GetType().GetType() == mysql.TypeYear { - tm, err = types.ParseTimeFromYear(stmt, i64s[i]) + tm, err = types.ParseTimeFromYear(i64s[i]) } else { - tm, err = types.ParseTimeFromNum(stmt, i64s[i], b.tp.GetType(), fsp) + tm, err = types.ParseTimeFromNum(stmt.TypeCtx(), i64s[i], b.tp.GetType(), fsp) } if err != nil { @@ -512,7 +512,7 @@ func (b *builtinCastJSONAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk duration := val.GetDuration() sc := b.ctx.GetSessionVars().StmtCtx - tm, err := duration.ConvertToTimeWithTimestamp(sc, b.tp.GetType(), ts) + tm, err := duration.ConvertToTimeWithTimestamp(sc.TypeCtx(), b.tp.GetType(), ts) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -520,7 +520,7 @@ func (b *builtinCastJSONAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk result.SetNull(i, true) continue } - tm, err = tm.RoundFrac(stmtCtx, fsp) + tm, err = tm.RoundFrac(stmtCtx.TypeCtx(), fsp) if err != nil { return err } @@ -530,7 +530,7 @@ func (b *builtinCastJSONAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk if err != nil { return err } - tm, err := types.ParseTime(stmtCtx, s, b.tp.GetType(), fsp, nil) + tm, err := types.ParseTime(stmtCtx.TypeCtx(), s, b.tp.GetType(), fsp, nil) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -584,7 +584,7 @@ func (b *builtinCastRealAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk times[i] = types.ZeroTime continue } - tm, err := types.ParseTimeFromFloatString(stmt, fv, b.tp.GetType(), fsp) + tm, err := types.ParseTimeFromFloatString(stmt.TypeCtx(), fv, b.tp.GetType(), fsp) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -623,8 +623,8 @@ func (b *builtinCastDecimalAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, resu if !(conditionUnionAndUnsigned && decs[i].IsNegative()) { *dec = decs[i] } - dec, err := types.ProduceDecWithSpecifiedTp(dec, b.tp, sc) - if err != nil { + dec, err := types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), dec, b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } decs[i] = *dec @@ -665,7 +665,7 @@ func (b *builtinCastDurationAsTimeSig) vecEvalTime(input *chunk.Chunk, result *c duration.Duration = ds[i] duration.Fsp = fsp - tm, err := duration.ConvertToTimeWithTimestamp(stmtCtx, b.tp.GetType(), ts) + tm, err := duration.ConvertToTimeWithTimestamp(stmtCtx.TypeCtx(), b.tp.GetType(), ts) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -673,7 +673,7 @@ func (b *builtinCastDurationAsTimeSig) vecEvalTime(input *chunk.Chunk, result *c result.SetNull(i, true) continue } - tm, err = tm.RoundFrac(stmtCtx, fsp) + tm, err = tm.RoundFrac(stmtCtx.TypeCtx(), fsp) if err != nil { return err } @@ -899,6 +899,7 @@ func (b *builtinCastRealAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result result.MergeNulls(buf) bufreal := buf.Float64s() resdecimal := result.Decimals() + sc := b.ctx.GetSessionVars().StmtCtx for i := 0; i < n; i++ { if result.IsNull(i) { continue @@ -917,8 +918,8 @@ func (b *builtinCastRealAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result } } } - dec, err := types.ProduceDecWithSpecifiedTp(&resdecimal[i], b.tp, b.ctx.GetSessionVars().StmtCtx) - if err != nil { + dec, err := types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), &resdecimal[i], b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } resdecimal[i] = *dec @@ -1010,7 +1011,7 @@ func (b *builtinCastStringAsDurationSig) vecEvalDuration(input *chunk.Chunk, res if result.IsNull(i) { continue } - dur, isNull, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, buf.GetString(i), b.tp.GetDecimal()) + dur, isNull, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), buf.GetString(i), b.tp.GetDecimal()) if err != nil { if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) @@ -1058,8 +1059,8 @@ func (b *builtinCastDurationAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, res } duration.Duration = ds[i] duration.Fsp = fsp - res, err := types.ProduceDecWithSpecifiedTp(duration.ToNumber(), b.tp, sc) - if err != nil { + res, err := types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), duration.ToNumber(), b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } d64s[i] = *res @@ -1104,8 +1105,8 @@ func (b *builtinCastIntAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result * dec.FromUint(uint64(nums[i])) } - dec, err = types.ProduceDecWithSpecifiedTp(dec, b.tp, sc) - if err != nil { + dec, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), dec, b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } decs[i] = *dec @@ -1254,12 +1255,13 @@ func (b *builtinCastJSONAsIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.C result.MergeNulls(buf) i64s := result.Int64s() sc := b.ctx.GetSessionVars().StmtCtx + tc := sc.TypeCtx() for i := 0; i < n; i++ { if result.IsNull(i) { continue } - i64s[i], err = types.ConvertJSONToInt64(sc, buf.GetJSON(i), mysql.HasUnsignedFlag(b.tp.GetFlag())) - if err != nil { + i64s[i], err = types.ConvertJSONToInt64(tc, buf.GetJSON(i), mysql.HasUnsignedFlag(b.tp.GetFlag())) + if err = sc.HandleOverflow(err, err); err != nil { return err } } @@ -1288,7 +1290,7 @@ func (b *builtinCastRealAsDurationSig) vecEvalDuration(input *chunk.Chunk, resul if result.IsNull(i) { continue } - dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, strconv.FormatFloat(f64s[i], 'f', -1, 64), b.tp.GetDecimal()) + dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), strconv.FormatFloat(f64s[i], 'f', -1, 64), b.tp.GetDecimal()) if err != nil { if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) @@ -1483,7 +1485,7 @@ func (b *builtinCastDecimalAsTimeSig) vecEvalTime(input *chunk.Chunk, result *ch if buf.IsNull(i) { continue } - tm, err := types.ParseTimeFromFloatString(stmt, string(decimals[i].ToString()), b.tp.GetType(), fsp) + tm, err := types.ParseTimeFromFloatString(stmt.TypeCtx(), string(decimals[i].ToString()), b.tp.GetType(), fsp) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -1524,7 +1526,7 @@ func (b *builtinCastTimeAsIntSig) vecEvalInt(input *chunk.Chunk, result *chunk.C if result.IsNull(i) { continue } - t, err := times[i].RoundFrac(sc, types.DefaultFsp) + t, err := times[i].RoundFrac(sc.TypeCtx(), types.DefaultFsp) if err != nil { return err } @@ -1553,7 +1555,7 @@ func (b *builtinCastTimeAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk if result.IsNull(i) { continue } - res, err := times[i].Convert(stmt, b.tp.GetType()) + res, err := times[i].Convert(stmt.TypeCtx(), b.tp.GetType()) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -1561,7 +1563,7 @@ func (b *builtinCastTimeAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk result.SetNull(i, true) continue } - tm, err := res.RoundFrac(stmt, fsp) + tm, err := res.RoundFrac(stmt.TypeCtx(), fsp) if err != nil { return err } @@ -1643,8 +1645,8 @@ func (b *builtinCastJSONAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, result if err != nil { return err } - tempres, err = types.ProduceDecWithSpecifiedTp(tempres, b.tp, sc) - if err != nil { + tempres, err = types.ProduceDecWithSpecifiedTp(sc.TypeCtx(), tempres, b.tp) + if err = sc.HandleOverflow(err, err); err != nil { return err } res[i] = *tempres @@ -1692,7 +1694,7 @@ func (b *builtinCastStringAsRealSig) vecEvalReal(input *chunk.Chunk, result *chu if b.inUnion && mysql.HasUnsignedFlag(b.tp.GetFlag()) && res < 0 { res = 0 } - res, err = types.ProduceFloatWithSpecifiedTp(res, b.tp, sc) + res, err = types.ProduceFloatWithSpecifiedTp(res, b.tp) if err != nil { return err } @@ -1733,8 +1735,8 @@ func (b *builtinCastStringAsDecimalSig) vecEvalDecimal(input *chunk.Chunk, resul if err := stmtCtx.HandleTruncate(dec.FromString([]byte(val))); err != nil { return err } - dec, err := types.ProduceDecWithSpecifiedTp(dec, b.tp, stmtCtx) - if err != nil { + dec, err := types.ProduceDecWithSpecifiedTp(stmtCtx.TypeCtx(), dec, b.tp) + if err = stmtCtx.HandleOverflow(err, err); err != nil { return err } res[i] = *dec @@ -1767,7 +1769,7 @@ func (b *builtinCastStringAsTimeSig) vecEvalTime(input *chunk.Chunk, result *chu if result.IsNull(i) { continue } - tm, err := types.ParseTime(stmtCtx, buf.GetString(i), b.tp.GetType(), fsp, nil) + tm, err := types.ParseTime(stmtCtx.TypeCtx(), buf.GetString(i), b.tp.GetType(), fsp, nil) if err != nil { if errors.Is(err, strconv.ErrSyntax) || errors.Is(err, strconv.ErrRange) { err = types.ErrIncorrectDatetimeValue.GenWithStackByArgs(buf.GetString(i)) @@ -1871,7 +1873,7 @@ func (b *builtinCastDecimalAsDurationSig) vecEvalDuration(input *chunk.Chunk, re if result.IsNull(i) { continue } - dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, string(args[i].ToString()), b.tp.GetDecimal()) + dur, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), string(args[i].ToString()), b.tp.GetDecimal()) if err != nil { if types.ErrTruncatedWrongVal.Equal(err) { err = b.ctx.GetSessionVars().StmtCtx.HandleTruncate(err) @@ -1977,7 +1979,7 @@ func (b *builtinCastJSONAsDurationSig) vecEvalDuration(input *chunk.Chunk, resul if err != nil { return err } - dur, _, err = types.ParseDuration(stmtCtx, s, b.tp.GetDecimal()) + dur, _, err = types.ParseDuration(stmtCtx.TypeCtx(), s, b.tp.GetDecimal()) if types.ErrTruncatedWrongVal.Equal(err) { err = stmtCtx.HandleTruncate(err) } diff --git a/pkg/expression/builtin_compare.go b/pkg/expression/builtin_compare.go index e1eb1532b8462..133a54182f9b1 100644 --- a/pkg/expression/builtin_compare.go +++ b/pkg/expression/builtin_compare.go @@ -19,6 +19,7 @@ import ( "math" "strings" + "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/opcode" @@ -29,7 +30,6 @@ import ( "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tipb/go-tipb" - "github.com/pkg/errors" ) var ( @@ -729,14 +729,14 @@ func doTimeConversionForGL(cmpAsDate bool, ctx sessionctx.Context, sc *stmtctx.S var t types.Time var err error if cmpAsDate { - t, err = types.ParseDate(sc, strVal) + t, err = types.ParseDate(sc.TypeCtx(), strVal) if err == nil { - t, err = t.Convert(sc, mysql.TypeDate) + t, err = t.Convert(sc.TypeCtx(), mysql.TypeDate) } } else { - t, err = types.ParseDatetime(sc, strVal) + t, err = types.ParseDatetime(sc.TypeCtx(), strVal) if err == nil { - t, err = t.Convert(sc, mysql.TypeDatetime) + t, err = t.Convert(sc.TypeCtx(), mysql.TypeDatetime) } } if err != nil { @@ -774,7 +774,7 @@ func (b *builtinGreatestTimeSig) evalTime(row chunk.Row) (res types.Time, isNull // Convert ETType Time value to MySQL actual type, distinguish date and datetime sc := b.ctx.GetSessionVars().StmtCtx resTimeTp := getAccurateTimeTypeForGLRet(b.cmpAsDate) - if res, err = res.Convert(sc, resTimeTp); err != nil { + if res, err = res.Convert(sc.TypeCtx(), resTimeTp); err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } return res, false, nil @@ -1048,7 +1048,7 @@ func (b *builtinLeastTimeSig) evalTime(row chunk.Row) (res types.Time, isNull bo // Convert ETType Time value to MySQL actual type, distinguish date and datetime sc := b.ctx.GetSessionVars().StmtCtx resTimeTp := getAccurateTimeTypeForGLRet(b.cmpAsDate) - if res, err = res.Convert(sc, resTimeTp); err != nil { + if res, err = res.Convert(sc.TypeCtx(), resTimeTp); err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } return res, false, nil @@ -1443,7 +1443,7 @@ func tryToConvertConstantInt(ctx sessionctx.Context, targetFieldType *types.Fiel } sc := ctx.GetSessionVars().StmtCtx - dt, err = dt.ConvertTo(sc, targetFieldType) + dt, err = dt.ConvertTo(sc.TypeCtx(), targetFieldType) if err != nil { if terror.ErrorEqual(err, types.ErrOverflow) { return &Constant{ @@ -1482,7 +1482,7 @@ func RefineComparedConstant(ctx sessionctx.Context, targetFieldType types.FieldT targetFieldType = *types.NewFieldType(mysql.TypeLonglong) } var intDatum types.Datum - intDatum, err = dt.ConvertTo(sc, &targetFieldType) + intDatum, err = dt.ConvertTo(sc.TypeCtx(), &targetFieldType) if err != nil { if terror.ErrorEqual(err, types.ErrOverflow) { return &Constant{ @@ -1494,7 +1494,7 @@ func RefineComparedConstant(ctx sessionctx.Context, targetFieldType types.FieldT } return con, false } - c, err := intDatum.Compare(sc, &con.Value, collate.GetBinaryCollator()) + c, err := intDatum.Compare(sc.TypeCtx(), &con.Value, collate.GetBinaryCollator()) if err != nil { return con, false } @@ -1539,7 +1539,7 @@ func RefineComparedConstant(ctx sessionctx.Context, targetFieldType types.FieldT // 3. Suppose the value of `con` is 2, when `targetFieldType.GetType()` is `TypeYear`, the value of `doubleDatum` // will be 2.0 and the value of `intDatum` will be 2002 in this case. var doubleDatum types.Datum - doubleDatum, err = dt.ConvertTo(sc, types.NewFieldType(mysql.TypeDouble)) + doubleDatum, err = dt.ConvertTo(sc.TypeCtx(), types.NewFieldType(mysql.TypeDouble)) if err != nil { return con, false } @@ -1737,7 +1737,7 @@ func (c *compareFunctionClass) refineNumericConstantCmpDatetime(ctx sessionctx.C sc := ctx.GetSessionVars().StmtCtx var datetimeDatum types.Datum targetFieldType := types.NewFieldType(mysql.TypeDatetime) - datetimeDatum, err = dt.ConvertTo(sc, targetFieldType) + datetimeDatum, err = dt.ConvertTo(sc.TypeCtx(), targetFieldType) if err != nil || datetimeDatum.IsNull() { return args } diff --git a/pkg/expression/builtin_compare_vec.go b/pkg/expression/builtin_compare_vec.go index 2d09673bfe251..06f830129738b 100644 --- a/pkg/expression/builtin_compare_vec.go +++ b/pkg/expression/builtin_compare_vec.go @@ -841,7 +841,7 @@ func (b *builtinGreatestTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk.C resTimeTp := getAccurateTimeTypeForGLRet(b.cmpAsDate) for rowIdx := 0; rowIdx < n; rowIdx++ { resTimes := result.Times() - resTimes[rowIdx], err = resTimes[rowIdx].Convert(sc, resTimeTp) + resTimes[rowIdx], err = resTimes[rowIdx].Convert(sc.TypeCtx(), resTimeTp) if err != nil { return err } @@ -882,7 +882,7 @@ func (b *builtinLeastTimeSig) vecEvalTime(input *chunk.Chunk, result *chunk.Colu resTimeTp := getAccurateTimeTypeForGLRet(b.cmpAsDate) for rowIdx := 0; rowIdx < n; rowIdx++ { resTimes := result.Times() - resTimes[rowIdx], err = resTimes[rowIdx].Convert(sc, resTimeTp) + resTimes[rowIdx], err = resTimes[rowIdx].Convert(sc.TypeCtx(), resTimeTp) if err != nil { return err } diff --git a/pkg/expression/builtin_miscellaneous_vec.go b/pkg/expression/builtin_miscellaneous_vec.go index 9042df587f834..d86f3853cf3fa 100644 --- a/pkg/expression/builtin_miscellaneous_vec.go +++ b/pkg/expression/builtin_miscellaneous_vec.go @@ -21,7 +21,6 @@ import ( "math" "net" "strings" - "sync/atomic" "time" "github.com/google/uuid" @@ -360,18 +359,13 @@ func doSleep(secs float64, sessVars *variable.SessionVars) (isKilled bool) { select { case <-ticker.C: // MySQL 8.0 sleep: https://dev.mysql.com/doc/refman/8.0/en/miscellaneous-functions.html#function_sleep - if len(sessVars.StmtCtx.TableIDs) == 0 { - // Regular kill or Killed because of max execution time - if atomic.CompareAndSwapUint32(&sessVars.Killed, 1, 0) || atomic.CompareAndSwapUint32(&sessVars.Killed, 2, 0) { - timer.Stop() - return true - } - } else { - // Regular kill or Killed because of max execution time. - if atomic.LoadUint32(&sessVars.Killed) == 1 || atomic.LoadUint32(&sessVars.Killed) == 2 { - timer.Stop() - return true + // Regular kill or Killed because of max execution time + if err := sessVars.SQLKiller.HandleSignal(); err != nil { + if len(sessVars.StmtCtx.TableIDs) == 0 { + sessVars.SQLKiller.Reset() } + timer.Stop() + return true } case <-timer.C: return false diff --git a/pkg/expression/builtin_miscellaneous_vec_test.go b/pkg/expression/builtin_miscellaneous_vec_test.go index 91da4c219c536..c299bad20b4a0 100644 --- a/pkg/expression/builtin_miscellaneous_vec_test.go +++ b/pkg/expression/builtin_miscellaneous_vec_test.go @@ -15,7 +15,6 @@ package expression import ( - "sync/atomic" "testing" "time" @@ -23,6 +22,7 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/mock" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/stretchr/testify/require" ) @@ -215,7 +215,7 @@ func TestSleepVectorized(t *testing.T) { start = time.Now() go func() { time.Sleep(1 * time.Second) - atomic.CompareAndSwapUint32(&ctx.GetSessionVars().Killed, 0, 1) + ctx.GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) }() err = f.vecEvalInt(input, result) sub = time.Since(start) diff --git a/pkg/expression/builtin_other_test.go b/pkg/expression/builtin_other_test.go index c0d364edc9cfc..968cf9ee5aa91 100644 --- a/pkg/expression/builtin_other_test.go +++ b/pkg/expression/builtin_other_test.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" @@ -67,9 +66,8 @@ func TestBitCount(t *testing.T) { require.Nil(t, test.count) continue } - sc := stmtctx.NewStmtCtxWithTimeZone(stmtCtx.TimeZone()) - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - res, err := count.ToInt64(sc) + ctx := types.DefaultStmtNoWarningContext.WithFlags(types.DefaultStmtFlags.WithIgnoreTruncateErr(true)) + res, err := count.ToInt64(ctx) require.NoError(t, err) require.Equal(t, test.count, res) } @@ -195,7 +193,7 @@ func TestValues(t *testing.T) { ret, err = evalBuiltinFunc(sig, chunk.Row{}) require.NoError(t, err) - cmp, err := ret.Compare(nil, &currInsertValues[1], collate.GetBinaryCollator()) + cmp, err := ret.Compare(types.DefaultStmtNoWarningContext, &currInsertValues[1], collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) } diff --git a/pkg/expression/builtin_other_vec_test.go b/pkg/expression/builtin_other_vec_test.go index 1a88cfaaef290..78c2220b0736b 100644 --- a/pkg/expression/builtin_other_vec_test.go +++ b/pkg/expression/builtin_other_vec_test.go @@ -27,7 +27,7 @@ import ( ) func dateTimeFromString(s string) types.Time { - t, err := types.ParseDate(nil, s) + t, err := types.ParseDate(types.DefaultStmtNoWarningContext, s) if err != nil { panic(err) } diff --git a/pkg/expression/builtin_time.go b/pkg/expression/builtin_time.go index 5af1a08e1be25..4f650eac69d27 100644 --- a/pkg/expression/builtin_time.go +++ b/pkg/expression/builtin_time.go @@ -309,7 +309,7 @@ func (c *dateLiteralFunctionClass) getFunction(ctx sessionctx.Context, args []Ex if !datePattern.MatchString(str) { return nil, types.ErrWrongValue.GenWithStackByArgs(types.DateStr, str) } - tm, err := types.ParseDate(ctx.GetSessionVars().StmtCtx, str) + tm, err := types.ParseDate(ctx.GetSessionVars().StmtCtx.TypeCtx(), str) if err != nil { return nil, err } @@ -597,7 +597,7 @@ func (b *builtinStringDurationTimeDiffSig) evalDuration(row chunk.Row) (d types. // calculateTimeDiff calculates interval difference of two types.Time. func calculateTimeDiff(sc *stmtctx.StatementContext, lhs, rhs types.Time) (d types.Duration, isNull bool, err error) { - d = lhs.Sub(sc, &rhs) + d = lhs.Sub(sc.TypeCtx(), &rhs) d.Duration, err = types.TruncateOverflowMySQLTime(d.Duration) if types.ErrTruncatedWrongVal.Equal(err) { err = sc.HandleTruncate(err) @@ -760,7 +760,7 @@ func convertStringToDuration(sc *stmtctx.StatementContext, str string, fsp int) fsp = mathutil.Max(lenStrFsp, fsp) } } - return types.StrToDuration(sc, str, fsp) + return types.StrToDuration(sc.TypeCtx(), str, fsp) } type dateFormatFunctionClass struct { @@ -1931,7 +1931,7 @@ func (b *builtinStrToDateDateSig) evalTime(row chunk.Row) (types.Time, bool, err } var t types.Time sc := b.ctx.GetSessionVars().StmtCtx - succ := t.StrToDate(sc, date, format) + succ := t.StrToDate(sc.TypeCtx(), date, format) if !succ { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())) } @@ -1964,7 +1964,7 @@ func (b *builtinStrToDateDatetimeSig) evalTime(row chunk.Row) (types.Time, bool, } var t types.Time sc := b.ctx.GetSessionVars().StmtCtx - succ := t.StrToDate(sc, date, format) + succ := t.StrToDate(sc.TypeCtx(), date, format) if !succ { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())) } @@ -2000,7 +2000,7 @@ func (b *builtinStrToDateDurationSig) evalDuration(row chunk.Row) (types.Duratio } var t types.Time sc := b.ctx.GetSessionVars().StmtCtx - succ := t.StrToDate(sc, date, format) + succ := t.StrToDate(sc.TypeCtx(), date, format) if !succ { return types.Duration{}, true, handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())) } @@ -2185,7 +2185,7 @@ func (b *builtinCurrentTime0ArgSig) evalDuration(row chunk.Row) (types.Duration, return types.Duration{}, true, err } dur := nowTs.In(tz).Format(types.TimeFormat) - res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, types.MinFsp) + res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), dur, types.MinFsp) if err != nil { return types.Duration{}, true, err } @@ -2213,7 +2213,7 @@ func (b *builtinCurrentTime1ArgSig) evalDuration(row chunk.Row) (types.Duration, return types.Duration{}, true, err } dur := nowTs.In(tz).Format(types.TimeFSPFormat) - res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, int(fsp)) + res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), dur, int(fsp)) if err != nil { return types.Duration{}, true, err } @@ -2273,7 +2273,7 @@ func (b *builtinTimeSig) evalDuration(row chunk.Row) (res types.Duration, isNull fsp = tmpFsp sc := b.ctx.GetSessionVars().StmtCtx - res, _, err = types.ParseDuration(sc, expr, fsp) + res, _, err = types.ParseDuration(sc.TypeCtx(), expr, fsp) if types.ErrTruncatedWrongVal.Equal(err) { err = sc.HandleTruncate(err) } @@ -2300,7 +2300,7 @@ func (c *timeLiteralFunctionClass) getFunction(ctx sessionctx.Context, args []Ex if !isDuration(str) { return nil, types.ErrWrongValue.GenWithStackByArgs(types.TimeStr, str) } - duration, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, str, types.GetFsp(str)) + duration, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx.TypeCtx(), str, types.GetFsp(str)) if err != nil { return nil, err } @@ -2678,7 +2678,7 @@ func (b *builtinExtractDatetimeFromStringSig) evalInt(row chunk.Row) (int64, boo } sc := b.ctx.GetSessionVars().StmtCtx if types.IsClockUnit(unit) && types.IsDateUnit(unit) { - dur, _, err := types.ParseDuration(sc, dtStr, types.GetFsp(dtStr)) + dur, _, err := types.ParseDuration(sc.TypeCtx(), dtStr, types.GetFsp(dtStr)) if err != nil { return 0, true, err } @@ -2686,7 +2686,7 @@ func (b *builtinExtractDatetimeFromStringSig) evalInt(row chunk.Row) (int64, boo if err != nil { return 0, true, err } - dt, err := types.ParseDatetime(sc, dtStr) + dt, err := types.ParseDatetime(sc.TypeCtx(), dtStr) if err != nil { return res, false, nil } @@ -2774,7 +2774,7 @@ func (du *baseDateArithmetical) getDateFromString(ctx sessionctx.Context, args [ } sc := ctx.GetSessionVars().StmtCtx - date, err := types.ParseTime(sc, dateStr, dateTp, types.MaxFsp, nil) + date, err := types.ParseTime(sc.TypeCtx(), dateStr, dateTp, types.MaxFsp, nil) if err != nil { err = handleInvalidTimeError(ctx, err) if err != nil { @@ -2794,7 +2794,7 @@ func (du *baseDateArithmetical) getDateFromInt(ctx sessionctx.Context, args []Ex } sc := ctx.GetSessionVars().StmtCtx - date, err := types.ParseTimeFromInt64(sc, dateInt) + date, err := types.ParseTimeFromInt64(sc.TypeCtx(), dateInt) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(ctx, err) } @@ -2814,7 +2814,7 @@ func (du *baseDateArithmetical) getDateFromReal(ctx sessionctx.Context, args []E } sc := ctx.GetSessionVars().StmtCtx - date, err := types.ParseTimeFromFloat64(sc, dateReal) + date, err := types.ParseTimeFromFloat64(sc.TypeCtx(), dateReal) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(ctx, err) } @@ -2834,7 +2834,7 @@ func (du *baseDateArithmetical) getDateFromDecimal(ctx sessionctx.Context, args } sc := ctx.GetSessionVars().StmtCtx - date, err := types.ParseTimeFromDecimal(sc, dateDec) + date, err := types.ParseTimeFromDecimal(sc.TypeCtx(), dateDec) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(ctx, err) } @@ -2983,7 +2983,7 @@ func (du *baseDateArithmetical) addDate(ctx sessionctx.Context, date types.Time, } date.SetCoreTime(types.FromGoTime(goTime)) - overflow, err := types.DateTimeIsOverflow(ctx.GetSessionVars().StmtCtx, date) + overflow, err := types.DateTimeIsOverflow(ctx.GetSessionVars().StmtCtx.TypeCtx(), date) if err := handleInvalidTimeError(ctx, err); err != nil { return types.ZeroTime, true, err } @@ -3053,7 +3053,7 @@ func (du *baseDateArithmetical) vecGetDateFromInt(b *baseBuiltinFunc, input *chu continue } - date, err := types.ParseTimeFromInt64(sc, i64s[i]) + date, err := types.ParseTimeFromInt64(sc.TypeCtx(), i64s[i]) if err != nil { err = handleInvalidTimeError(b.ctx, err) if err != nil { @@ -3095,7 +3095,7 @@ func (du *baseDateArithmetical) vecGetDateFromReal(b *baseBuiltinFunc, input *ch continue } - date, err := types.ParseTimeFromFloat64(sc, f64s[i]) + date, err := types.ParseTimeFromFloat64(sc.TypeCtx(), f64s[i]) if err != nil { err = handleInvalidTimeError(b.ctx, err) if err != nil { @@ -3137,7 +3137,7 @@ func (du *baseDateArithmetical) vecGetDateFromDecimal(b *baseBuiltinFunc, input } dec := buf.GetDecimal(i) - date, err := types.ParseTimeFromDecimal(sc, dec) + date, err := types.ParseTimeFromDecimal(sc.TypeCtx(), dec) if err != nil { err = handleInvalidTimeError(b.ctx, err) if err != nil { @@ -3184,7 +3184,7 @@ func (du *baseDateArithmetical) vecGetDateFromString(b *baseBuiltinFunc, input * dateTp = mysql.TypeDatetime } - date, err := types.ParseTime(sc, dateStr, dateTp, types.MaxFsp, nil) + date, err := types.ParseTime(sc.TypeCtx(), dateStr, dateTp, types.MaxFsp, nil) if err != nil { err = handleInvalidTimeError(b.ctx, err) if err != nil { @@ -4002,7 +4002,7 @@ func (b *builtinAddSubDateDurationAnySig) evalTime(row chunk.Row) (types.Time, b } sc := b.ctx.GetSessionVars().StmtCtx - t, err := d.ConvertToTime(sc, mysql.TypeDatetime) + t, err := d.ConvertToTime(sc.TypeCtx(), mysql.TypeDatetime) if err != nil { return types.ZeroTime, true, err } @@ -4355,9 +4355,9 @@ func (b *builtinTimestamp1ArgSig) evalTime(row chunk.Row) (types.Time, bool, err var tm types.Time sc := b.ctx.GetSessionVars().StmtCtx if b.isFloat { - tm, err = types.ParseTimeFromFloatString(sc, s, mysql.TypeDatetime, types.GetFsp(s)) + tm, err = types.ParseTimeFromFloatString(sc.TypeCtx(), s, mysql.TypeDatetime, types.GetFsp(s)) } else { - tm, err = types.ParseTime(sc, s, mysql.TypeDatetime, types.GetFsp(s), nil) + tm, err = types.ParseTime(sc.TypeCtx(), s, mysql.TypeDatetime, types.GetFsp(s), nil) } if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) @@ -4387,9 +4387,9 @@ func (b *builtinTimestamp2ArgsSig) evalTime(row chunk.Row) (types.Time, bool, er var tm types.Time sc := b.ctx.GetSessionVars().StmtCtx if b.isFloat { - tm, err = types.ParseTimeFromFloatString(sc, arg0, mysql.TypeDatetime, types.GetFsp(arg0)) + tm, err = types.ParseTimeFromFloatString(sc.TypeCtx(), arg0, mysql.TypeDatetime, types.GetFsp(arg0)) } else { - tm, err = types.ParseTime(sc, arg0, mysql.TypeDatetime, types.GetFsp(arg0), nil) + tm, err = types.ParseTime(sc.TypeCtx(), arg0, mysql.TypeDatetime, types.GetFsp(arg0), nil) } if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) @@ -4406,11 +4406,11 @@ func (b *builtinTimestamp2ArgsSig) evalTime(row chunk.Row) (types.Time, bool, er if !isDuration(arg1) { return types.ZeroTime, true, nil } - duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { return types.ZeroTime, true, handleInvalidTimeError(b.ctx, err) } - tmp, err := tm.Add(sc, duration) + tmp, err := tm.Add(sc.TypeCtx(), duration) if err != nil { return types.ZeroTime, true, err } @@ -4440,7 +4440,7 @@ func (c *timestampLiteralFunctionClass) getFunction(ctx sessionctx.Context, args if !timestampPattern.MatchString(str) { return nil, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, str) } - tm, err := types.ParseTime(ctx.GetSessionVars().StmtCtx, str, mysql.TypeDatetime, types.GetFsp(str), nil) + tm, err := types.ParseTime(ctx.GetSessionVars().StmtCtx.TypeCtx(), str, mysql.TypeDatetime, types.GetFsp(str), nil) if err != nil { return nil, err } @@ -4548,13 +4548,13 @@ func isDuration(str string) bool { // strDatetimeAddDuration adds duration to datetime string, returns a string value. func strDatetimeAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (result string, isNull bool, err error) { - arg0, err := types.ParseTime(sc, d, mysql.TypeDatetime, types.MaxFsp, nil) + arg0, err := types.ParseTime(sc.TypeCtx(), d, mysql.TypeDatetime, types.MaxFsp, nil) if err != nil { // Return a warning regardless of the sql_mode, this is compatible with MySQL. sc.AppendWarning(err) return "", true, nil } - ret, err := arg0.Add(sc, arg1) + ret, err := arg0.Add(sc.TypeCtx(), arg1) if err != nil { return "", false, err } @@ -4568,7 +4568,7 @@ func strDatetimeAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.D // strDurationAddDuration adds duration to duration string, returns a string value. func strDurationAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (string, error) { - arg0, _, err := types.ParseDuration(sc, d, types.MaxFsp) + arg0, _, err := types.ParseDuration(sc.TypeCtx(), d, types.MaxFsp) if err != nil { return "", err } @@ -4585,13 +4585,13 @@ func strDurationAddDuration(sc *stmtctx.StatementContext, d string, arg1 types.D // strDatetimeSubDuration subtracts duration from datetime string, returns a string value. func strDatetimeSubDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (result string, isNull bool, err error) { - arg0, err := types.ParseTime(sc, d, mysql.TypeDatetime, types.MaxFsp, nil) + arg0, err := types.ParseTime(sc.TypeCtx(), d, mysql.TypeDatetime, types.MaxFsp, nil) if err != nil { // Return a warning regardless of the sql_mode, this is compatible with MySQL. sc.AppendWarning(err) return "", true, nil } - resultTime, err := arg0.Add(sc, arg1.Neg()) + resultTime, err := arg0.Add(sc.TypeCtx(), arg1.Neg()) if err != nil { return "", false, err } @@ -4605,7 +4605,7 @@ func strDatetimeSubDuration(sc *stmtctx.StatementContext, d string, arg1 types.D // strDurationSubDuration subtracts duration from duration string, returns a string value. func strDurationSubDuration(sc *stmtctx.StatementContext, d string, arg1 types.Duration) (string, error) { - arg0, _, err := types.ParseDuration(sc, d, types.MaxFsp) + arg0, _, err := types.ParseDuration(sc.TypeCtx(), d, types.MaxFsp) if err != nil { return "", err } @@ -4725,7 +4725,7 @@ func (b *builtinAddDatetimeAndDurationSig) evalTime(row chunk.Row) (types.Time, if isNull || err != nil { return types.ZeroDatetime, isNull, err } - result, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx, arg1) + result, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), arg1) return result, err != nil, err } @@ -4754,7 +4754,7 @@ func (b *builtinAddDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo return types.ZeroDatetime, true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, types.GetFsp(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -4762,7 +4762,7 @@ func (b *builtinAddDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo } return types.ZeroDatetime, true, err } - result, err := arg0.Add(sc, arg1) + result, err := arg0.Add(sc.TypeCtx(), arg1) return result, err != nil, err } @@ -4835,7 +4835,7 @@ func (b *builtinAddDurationAndStringSig) evalDuration(row chunk.Row) (types.Dura return types.ZeroDuration, true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, types.GetFsp(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -4937,7 +4937,7 @@ func (b *builtinAddStringAndStringSig) evalString(row chunk.Row) (result string, return "", isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err = types.ParseDuration(sc, arg1Str, getFsp4TimeAddSub(arg1Str)) + arg1, _, err = types.ParseDuration(sc.TypeCtx(), arg1Str, getFsp4TimeAddSub(arg1Str)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5020,7 +5020,7 @@ func (b *builtinAddDateAndStringSig) evalString(row chunk.Row) (string, bool, er return "", true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, getFsp4TimeAddSub(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, getFsp4TimeAddSub(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5286,7 +5286,7 @@ func (b *builtinMakeTimeSig) makeTime(hour int64, minute int64, second float64, second = 59 } fsp := b.tp.GetDecimal() - d, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%02d:%02d:%v", hour, minute, second), fsp) + d, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), fmt.Sprintf("%02d:%02d:%v", hour, minute, second), fsp) return d, err } @@ -5581,7 +5581,7 @@ func (b *builtinSecToTimeSig) evalDuration(row chunk.Row) (types.Duration, bool, secondDemical = float64(second) + demical var dur types.Duration - dur, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal()) + dur, _, err = types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal()) if err != nil { return types.Duration{}, err != nil, err } @@ -5678,7 +5678,7 @@ func (b *builtinSubDatetimeAndDurationSig) evalTime(row chunk.Row) (types.Time, return types.ZeroDatetime, isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - result, err := arg0.Add(sc, arg1.Neg()) + result, err := arg0.Add(sc.TypeCtx(), arg1.Neg()) return result, err != nil, err } @@ -5707,7 +5707,7 @@ func (b *builtinSubDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo return types.ZeroDatetime, true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, types.GetFsp(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5715,7 +5715,7 @@ func (b *builtinSubDatetimeAndStringSig) evalTime(row chunk.Row) (types.Time, bo } return types.ZeroDatetime, true, err } - result, err := arg0.Add(sc, arg1.Neg()) + result, err := arg0.Add(sc.TypeCtx(), arg1.Neg()) return result, err != nil, err } @@ -5806,7 +5806,7 @@ func (b *builtinSubStringAndStringSig) evalString(row chunk.Row) (result string, return "", isNull, err } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err = types.ParseDuration(sc, s, getFsp4TimeAddSub(s)) + arg1, _, err = types.ParseDuration(sc.TypeCtx(), s, getFsp4TimeAddSub(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5898,7 +5898,7 @@ func (b *builtinSubDurationAndStringSig) evalDuration(row chunk.Row) (types.Dura return types.ZeroDuration, true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, types.GetFsp(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, types.GetFsp(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -5976,7 +5976,7 @@ func (b *builtinSubDateAndStringSig) evalString(row chunk.Row) (string, bool, er return "", true, nil } sc := b.ctx.GetSessionVars().StmtCtx - arg1, _, err := types.ParseDuration(sc, s, getFsp4TimeAddSub(s)) + arg1, _, err := types.ParseDuration(sc.TypeCtx(), s, getFsp4TimeAddSub(s)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -6238,7 +6238,7 @@ func (b *builtinTimestampAddSig) evalString(row chunk.Row) (string, bool, error) fsp = types.MaxFsp } r := types.NewTime(types.FromGoTime(tb), b.resolveType(arg.Type(), unit), fsp) - if err = r.Check(b.ctx.GetSessionVars().StmtCtx); err != nil { + if err = r.Check(b.ctx.GetSessionVars().StmtCtx.TypeCtx()); err != nil { return "", true, handleInvalidTimeError(b.ctx, err) } return r.String(), false, nil @@ -6403,7 +6403,7 @@ func (b *builtinUTCTimeWithoutArgSig) evalDuration(row chunk.Row) (types.Duratio if err != nil { return types.Duration{}, true, err } - v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFormat), 0) + v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), nowTs.UTC().Format(types.TimeFormat), 0) return v, false, err } @@ -6434,7 +6434,7 @@ func (b *builtinUTCTimeWithArgSig) evalDuration(row chunk.Row) (types.Duration, if err != nil { return types.Duration{}, true, err } - v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFSPFormat), int(fsp)) + v, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), nowTs.UTC().Format(types.TimeFSPFormat), int(fsp)) return v, false, err } diff --git a/pkg/expression/builtin_time_test.go b/pkg/expression/builtin_time_test.go index 85c175617414a..f605647c711eb 100644 --- a/pkg/expression/builtin_time_test.go +++ b/pkg/expression/builtin_time_test.go @@ -421,7 +421,7 @@ func TestDate(t *testing.T) { func TestMonthName(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected string @@ -457,7 +457,7 @@ func TestMonthName(t *testing.T) { func TestDayName(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected string @@ -495,7 +495,7 @@ func TestDayName(t *testing.T) { func TestDayOfWeek(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected int64 @@ -531,7 +531,7 @@ func TestDayOfWeek(t *testing.T) { func TestDayOfMonth(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected int64 @@ -567,7 +567,7 @@ func TestDayOfMonth(t *testing.T) { func TestDayOfYear(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) cases := []struct { args interface{} expected int64 @@ -964,7 +964,7 @@ func TestAddTimeSig(t *testing.T) { {"-110:00:00", "1 02:00:00", "-84:00:00"}, } for _, c := range tbl { - dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, c.Input, types.GetFsp(c.Input)) + dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx.TypeCtx(), c.Input, types.GetFsp(c.Input)) require.NoError(t, err) tmpInput := types.NewDurationDatum(dur) tmpInputDuration := types.NewStringDatum(c.InputDuration) @@ -1065,7 +1065,7 @@ func TestSubTimeSig(t *testing.T) { {"235959", "00:00:01", "23:59:58"}, } for _, c := range tbl { - dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx, c.Input, types.GetFsp(c.Input)) + dur, _, err := types.ParseDuration(ctx.GetSessionVars().StmtCtx.TypeCtx(), c.Input, types.GetFsp(c.Input)) require.NoError(t, err) tmpInput := types.NewDurationDatum(dur) tmpInputDuration := types.NewStringDatum(c.InputDuration) @@ -1192,7 +1192,7 @@ func convertToTimeWithFsp(sc *stmtctx.StatementContext, arg types.Datum, tp byte f := types.NewFieldType(tp) f.SetDecimal(fsp) - d, err = arg.ConvertTo(sc, f) + d, err = arg.ConvertTo(sc.TypeCtx(), f) if err != nil { d.SetNull() return d, err @@ -1613,7 +1613,7 @@ func TestDateDiff(t *testing.T) { func TestTimeDiff(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) // Test cases from https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_timediff tests := []struct { args []interface{} @@ -1717,7 +1717,7 @@ func TestWeekWithoutModeSig(t *testing.T) { func TestYearWeek(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) // Test cases from https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_yearweek tests := []struct { t string @@ -1781,8 +1781,7 @@ func TestTimestampDiff(t *testing.T) { } sc := ctx.GetSessionVars().StmtCtx - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true).WithIgnoreZeroInDate(true)) resetStmtContext(ctx) f, err := fc.getFunction(ctx, datumsToConstants([]types.Datum{types.NewStringDatum("DAY"), types.NewStringDatum("2017-01-00"), @@ -1843,7 +1842,7 @@ func TestUnixTimestamp(t *testing.T) { // Set the time_zone variable, because UnixTimestamp() result depends on it. ctx.GetSessionVars().TimeZone = time.UTC - ctx.GetSessionVars().StmtCtx.IgnoreZeroInDate = true + ctx.GetSessionVars().StmtCtx.SetTypeFlags(ctx.GetSessionVars().StmtCtx.TypeFlags().WithIgnoreZeroInDate(true)) tests := []struct { inputDecimal int input types.Datum @@ -2161,7 +2160,7 @@ func TestDateArithFuncs(t *testing.T) { }, } for _, tt := range testDurations { - dur, _, ok, err := types.StrToDuration(nil, tt.dur, tt.fsp) + dur, _, ok, err := types.StrToDuration(types.DefaultStmtNoWarningContext, tt.dur, tt.fsp) require.NoError(t, err) require.True(t, ok) args = types.MakeDatums(dur, tt.format, tt.unit) @@ -2400,7 +2399,7 @@ func TestMakeTime(t *testing.T) { func TestQuarter(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) tests := []struct { t string expect int64 @@ -2479,7 +2478,7 @@ func TestGetFormat(t *testing.T) { func TestToSeconds(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) tests := []struct { param interface{} expect int64 @@ -2522,7 +2521,7 @@ func TestToSeconds(t *testing.T) { func TestToDays(t *testing.T) { ctx := createContext(t) sc := ctx.GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) tests := []struct { param interface{} expect int64 @@ -2951,7 +2950,7 @@ func TestLastDay(t *testing.T) { } var timeData types.Time - timeData.StrToDate(ctx.GetSessionVars().StmtCtx, "202010", "%Y%m") + timeData.StrToDate(ctx.GetSessionVars().StmtCtx.TypeCtx(), "202010", "%Y%m") testsNull := []struct { param interface{} isNilNoZeroDate bool @@ -2996,7 +2995,7 @@ func TestWithTimeZone(t *testing.T) { return result } durationToGoTime := func(d types.Datum, loc *time.Location) time.Time { - t, _ := d.GetMysqlDuration().ConvertToTime(sv.StmtCtx, mysql.TypeDatetime) + t, _ := d.GetMysqlDuration().ConvertToTime(sv.StmtCtx.TypeCtx(), mysql.TypeDatetime) result, _ := t.GoTime(sv.TimeZone) return result } diff --git a/pkg/expression/builtin_time_vec.go b/pkg/expression/builtin_time_vec.go index 0ff70bf96e7c6..df66101068f62 100644 --- a/pkg/expression/builtin_time_vec.go +++ b/pkg/expression/builtin_time_vec.go @@ -421,7 +421,7 @@ func (b *builtinUTCTimeWithArgSig) vecEvalDuration(input *chunk.Chunk, result *c if fsp < int64(types.MinFsp) { return errors.Errorf("Invalid negative %d specified, must in [0, 6]", fsp) } - res, _, err := types.ParseDuration(stmtCtx, utc, int(fsp)) + res, _, err := types.ParseDuration(stmtCtx.TypeCtx(), utc, int(fsp)) if err != nil { return err } @@ -726,7 +726,7 @@ func (b *builtinStrToDateDateSig) vecEvalTime(input *chunk.Chunk, result *chunk. continue } var t types.Time - succ := t.StrToDate(sc, bufStrings.GetString(i), bufFormats.GetString(i)) + succ := t.StrToDate(sc.TypeCtx(), bufStrings.GetString(i), bufFormats.GetString(i)) if !succ { if err := handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())); err != nil { return err @@ -1138,7 +1138,7 @@ func (b *builtinStrToDateDurationSig) vecEvalDuration(input *chunk.Chunk, result continue } var t types.Time - succ := t.StrToDate(sc, bufStrings.GetString(i), bufFormats.GetString(i)) + succ := t.StrToDate(sc.TypeCtx(), bufStrings.GetString(i), bufFormats.GetString(i)) if !succ { if err := handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())); err != nil { return err @@ -1500,7 +1500,7 @@ func (b *builtinStrToDateDatetimeSig) vecEvalTime(input *chunk.Chunk, result *ch continue } var t types.Time - succ := t.StrToDate(sc, dateBuf.GetString(i), formatBuf.GetString(i)) + succ := t.StrToDate(sc.TypeCtx(), dateBuf.GetString(i), formatBuf.GetString(i)) if !succ { if err = handleInvalidTimeError(b.ctx, types.ErrWrongValue.GenWithStackByArgs(types.DateTimeStr, t.String())); err != nil { return err @@ -1744,7 +1744,7 @@ func (b *builtinTimestampAddSig) vecEvalString(input *chunk.Chunk, result *chunk fsp = types.MaxFsp } r := types.NewTime(types.FromGoTime(tb), b.resolveType(arg.Type(), unit), fsp) - if err = r.Check(b.ctx.GetSessionVars().StmtCtx); err != nil { + if err = r.Check(b.ctx.GetSessionVars().StmtCtx.TypeCtx()); err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err } @@ -1937,7 +1937,7 @@ func (b *builtinSecToTimeSig) vecEvalDuration(input *chunk.Chunk, result *chunk. second = seconds % 60 } secondDemical := float64(second) + demical - duration, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal()) + duration, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), fmt.Sprintf("%s%02d:%02d:%s", negative, hour, minute, strconv.FormatFloat(secondDemical, 'f', -1, 64)), b.tp.GetDecimal()) if err != nil { return err } @@ -1958,7 +1958,7 @@ func (b *builtinUTCTimeWithoutArgSig) vecEvalDuration(input *chunk.Chunk, result if err != nil { return err } - res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, nowTs.UTC().Format(types.TimeFormat), types.DefaultFsp) + res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), nowTs.UTC().Format(types.TimeFormat), types.DefaultFsp) if err != nil { return err } @@ -2361,7 +2361,7 @@ func (b *builtinCurrentTime0ArgSig) vecEvalDuration(input *chunk.Chunk, result * } tz := b.ctx.GetSessionVars().Location() dur := nowTs.In(tz).Format(types.TimeFormat) - res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx, dur, types.MinFsp) + res, _, err := types.ParseDuration(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), dur, types.MinFsp) if err != nil { return err } @@ -2409,7 +2409,7 @@ func (b *builtinTimeSig) vecEvalDuration(input *chunk.Chunk, result *chunk.Colum } fsp = tmpFsp - res, _, err := types.ParseDuration(sc, expr, fsp) + res, _, err := types.ParseDuration(sc.TypeCtx(), expr, fsp) if types.ErrTruncatedWrongVal.Equal(err) { err = sc.HandleTruncate(err) } @@ -2555,7 +2555,7 @@ func (b *builtinCurrentTime1ArgSig) vecEvalDuration(input *chunk.Chunk, result * result.ResizeGoDuration(n, false) durations := result.GoDurations() for i := 0; i < n; i++ { - res, _, err := types.ParseDuration(stmtCtx, dur, int(i64s[i])) + res, _, err := types.ParseDuration(stmtCtx.TypeCtx(), dur, int(i64s[i])) if err != nil { return err } @@ -2658,9 +2658,9 @@ func (b *builtinTimestamp1ArgSig) vecEvalTime(input *chunk.Chunk, result *chunk. s := buf.GetString(i) if b.isFloat { - tm, err = types.ParseTimeFromFloatString(sc, s, mysql.TypeDatetime, types.GetFsp(s)) + tm, err = types.ParseTimeFromFloatString(sc.TypeCtx(), s, mysql.TypeDatetime, types.GetFsp(s)) } else { - tm, err = types.ParseTime(sc, s, mysql.TypeDatetime, types.GetFsp(s), nil) + tm, err = types.ParseTime(sc.TypeCtx(), s, mysql.TypeDatetime, types.GetFsp(s), nil) } if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { @@ -2711,9 +2711,9 @@ func (b *builtinTimestamp2ArgsSig) vecEvalTime(input *chunk.Chunk, result *chunk arg1 := buf1.GetString(i) if b.isFloat { - tm, err = types.ParseTimeFromFloatString(sc, arg0, mysql.TypeDatetime, types.GetFsp(arg0)) + tm, err = types.ParseTimeFromFloatString(sc.TypeCtx(), arg0, mysql.TypeDatetime, types.GetFsp(arg0)) } else { - tm, err = types.ParseTime(sc, arg0, mysql.TypeDatetime, types.GetFsp(arg0), nil) + tm, err = types.ParseTime(sc.TypeCtx(), arg0, mysql.TypeDatetime, types.GetFsp(arg0), nil) } if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { @@ -2734,7 +2734,7 @@ func (b *builtinTimestamp2ArgsSig) vecEvalTime(input *chunk.Chunk, result *chunk continue } - duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if err = handleInvalidTimeError(b.ctx, err); err != nil { return err @@ -2742,7 +2742,7 @@ func (b *builtinTimestamp2ArgsSig) vecEvalTime(input *chunk.Chunk, result *chunk result.SetNull(i, true) continue } - tmp, err := tm.Add(sc, duration) + tmp, err := tm.Add(sc.TypeCtx(), duration) if err != nil { return err } @@ -2929,7 +2929,7 @@ func (b *builtinAddSubDateDurationAnySig) vecEvalTime(input *chunk.Chunk, result continue } iterDuration.Duration = goDurations[i] - t, err := iterDuration.ConvertToTime(sc, mysql.TypeDatetime) + t, err := iterDuration.ConvertToTime(sc.TypeCtx(), mysql.TypeDatetime) if err != nil { result.SetNull(i, true) } diff --git a/pkg/expression/builtin_time_vec_generated.go b/pkg/expression/builtin_time_vec_generated.go index a40cdb3909554..9e29d8f0abe97 100644 --- a/pkg/expression/builtin_time_vec_generated.go +++ b/pkg/expression/builtin_time_vec_generated.go @@ -62,7 +62,7 @@ func (b *builtinAddDatetimeAndDurationSig) vecEvalTime(input *chunk.Chunk, resul // calculate - output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx, types.Duration{Duration: arg1, Fsp: -1}) + output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), types.Duration{Duration: arg1, Fsp: -1}) if err != nil { return err @@ -122,7 +122,7 @@ func (b *builtinAddDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -132,7 +132,7 @@ func (b *builtinAddDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result return err } - output, err := arg0.Add(sc, arg1Duration) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration) if err != nil { return err @@ -248,7 +248,7 @@ func (b *builtinAddDurationAndStringSig) vecEvalDuration(input *chunk.Chunk, res continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -406,7 +406,7 @@ func (b *builtinAddStringAndStringSig) vecEvalString(input *chunk.Chunk, result // calculate sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, getFsp4TimeAddSub(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -566,7 +566,7 @@ func (b *builtinAddDateAndStringSig) vecEvalString(input *chunk.Chunk, result *c continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, getFsp4TimeAddSub(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -677,7 +677,7 @@ func (b *builtinSubDatetimeAndDurationSig) vecEvalTime(input *chunk.Chunk, resul sc := b.ctx.GetSessionVars().StmtCtx arg1Duration := types.Duration{Duration: arg1, Fsp: -1} - output, err := arg0.Add(sc, arg1Duration.Neg()) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration.Neg()) if err != nil { return err @@ -737,7 +737,7 @@ func (b *builtinSubDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -746,7 +746,7 @@ func (b *builtinSubDatetimeAndStringSig) vecEvalTime(input *chunk.Chunk, result } return err } - output, err := arg0.Add(sc, arg1Duration.Neg()) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration.Neg()) if err != nil { return err @@ -862,7 +862,7 @@ func (b *builtinSubDurationAndStringSig) vecEvalDuration(input *chunk.Chunk, res continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -1020,7 +1020,7 @@ func (b *builtinSubStringAndStringSig) vecEvalString(input *chunk.Chunk, result // calculate sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, getFsp4TimeAddSub(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -1180,7 +1180,7 @@ func (b *builtinSubDateAndStringSig) vecEvalString(input *chunk.Chunk, result *c continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, getFsp4TimeAddSub(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, getFsp4TimeAddSub(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) diff --git a/pkg/expression/builtin_vectorized_test.go b/pkg/expression/builtin_vectorized_test.go index d5921a2ffc43b..f50338f8a58eb 100644 --- a/pkg/expression/builtin_vectorized_test.go +++ b/pkg/expression/builtin_vectorized_test.go @@ -279,7 +279,7 @@ func (p *mockBuiltinDouble) vecEvalTime(input *chunk.Chunk, result *chunk.Column if err != nil { return err } - if ts[i], err = ts[i].Add(p.ctx.GetSessionVars().StmtCtx, d); err != nil { + if ts[i], err = ts[i].Add(p.ctx.GetSessionVars().StmtCtx.TypeCtx(), d); err != nil { return err } } @@ -371,7 +371,7 @@ func (p *mockBuiltinDouble) evalTime(row chunk.Row) (types.Time, bool, error) { if err != nil { return types.ZeroTime, false, err } - v, err = v.Add(p.ctx.GetSessionVars().StmtCtx, d) + v, err = v.Add(p.ctx.GetSessionVars().StmtCtx.TypeCtx(), d) return v, isNull, err } @@ -512,7 +512,7 @@ func checkVecEval(t *testing.T, eType types.EvalType, sel []int, result *chunk.C tt := types.NewTime(gt, convertETType(eType), 0) d, err := tt.ConvertToDuration() require.NoError(t, err) - v, err := tt.Add(mock.NewContext().GetSessionVars().StmtCtx, d) + v, err := tt.Add(mock.NewContext().GetSessionVars().StmtCtx.TypeCtx(), d) require.NoError(t, err) require.Equal(t, 0, v.Compare(ds[i])) } diff --git a/pkg/expression/column.go b/pkg/expression/column.go index e7e8af727a6c1..bfd1d051ea577 100644 --- a/pkg/expression/column.go +++ b/pkg/expression/column.go @@ -100,7 +100,7 @@ func (col *CorrelatedColumn) EvalInt(ctx sessionctx.Context, row chunk.Row) (int return 0, true, nil } if col.GetType().Hybrid() { - res, err := col.Data.ToInt64(ctx.GetSessionVars().StmtCtx) + res, err := col.Data.ToInt64(ctx.GetSessionVars().StmtCtx.TypeCtx()) return res, err != nil, err } return col.Data.GetInt64(), false, nil @@ -425,7 +425,7 @@ func (col *Column) EvalInt(ctx sessionctx.Context, row chunk.Row) (int64, bool, val, err := val.GetBinaryLiteral().ToInt(ctx.GetSessionVars().StmtCtx.TypeCtx()) return int64(val), err != nil, err } - res, err := val.ToInt64(ctx.GetSessionVars().StmtCtx) + res, err := val.ToInt64(ctx.GetSessionVars().StmtCtx.TypeCtx()) return res, err != nil, err } if row.IsNull(col.Index) { @@ -703,7 +703,7 @@ func (col *Column) SupportReverseEval() bool { // ReverseEval evaluates the only one column value with given function result. func (col *Column) ReverseEval(sc *stmtctx.StatementContext, res types.Datum, rType types.RoundingType) (val types.Datum, err error) { - return types.ChangeReverseResultByUpperLowerBound(sc, col.RetType, res, rType) + return types.ChangeReverseResultByUpperLowerBound(sc.TypeCtx(), col.RetType, res, rType) } // Coercibility returns the coercibility value which is used to check collations. diff --git a/pkg/expression/constant.go b/pkg/expression/constant.go index 74f2504f8beaf..91703c3c29595 100644 --- a/pkg/expression/constant.go +++ b/pkg/expression/constant.go @@ -250,7 +250,7 @@ func (c *Constant) Eval(row chunk.Row) (types.Datum, error) { sf, sfOk := c.DeferredExpr.(*ScalarFunction) if sfOk { if dt.Kind() != types.KindMysqlDecimal { - val, err := dt.ConvertTo(sf.GetCtx().GetSessionVars().StmtCtx, c.RetType) + val, err := dt.ConvertTo(sf.GetCtx().GetSessionVars().StmtCtx.TypeCtx(), c.RetType) if err != nil { return dt, err } @@ -281,7 +281,7 @@ func (c *Constant) EvalInt(ctx sessionctx.Context, row chunk.Row) (int64, bool, val, err := dt.GetBinaryLiteral().ToInt(ctx.GetSessionVars().StmtCtx.TypeCtx()) return int64(val), err != nil, err } else if c.GetType().Hybrid() || dt.Kind() == types.KindString { - res, err := dt.ToInt64(ctx.GetSessionVars().StmtCtx) + res, err := dt.ToInt64(ctx.GetSessionVars().StmtCtx.TypeCtx()) return res, false, err } else if dt.Kind() == types.KindMysqlBit { uintVal, err := dt.GetBinaryLiteral().ToInt(ctx.GetSessionVars().StmtCtx.TypeCtx()) @@ -412,7 +412,7 @@ func (c *Constant) Equal(ctx sessionctx.Context, b Expression) bool { if err1 != nil || err2 != nil { return false } - con, err := c.Value.Compare(ctx.GetSessionVars().StmtCtx, &y.Value, collate.GetBinaryCollator()) + con, err := c.Value.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &y.Value, collate.GetBinaryCollator()) if err != nil || con != 0 { return false } diff --git a/pkg/expression/constant_propagation.go b/pkg/expression/constant_propagation.go index 5bb233a3127f2..5b19da65c55e9 100644 --- a/pkg/expression/constant_propagation.go +++ b/pkg/expression/constant_propagation.go @@ -62,7 +62,7 @@ func (s *basePropConstSolver) tryToUpdateEQList(col *Column, con *Constant) (boo id := s.getColID(col) oldCon := s.eqList[id] if oldCon != nil { - res, err := oldCon.Value.Compare(s.ctx.GetSessionVars().StmtCtx, &con.Value, collate.GetCollator(col.GetType().GetCollate())) + res, err := oldCon.Value.Compare(s.ctx.GetSessionVars().StmtCtx.TypeCtx(), &con.Value, collate.GetCollator(col.GetType().GetCollate())) return false, res != 0 || err != nil } s.eqList[id] = con diff --git a/pkg/expression/distsql_builtin_test.go b/pkg/expression/distsql_builtin_test.go index ef4a2ff34567b..b12fb40f8fc08 100644 --- a/pkg/expression/distsql_builtin_test.go +++ b/pkg/expression/distsql_builtin_test.go @@ -785,7 +785,7 @@ func TestEval(t *testing.T) { result, err := expr.Eval(row) require.NoError(t, err) require.Equal(t, tt.result.Kind(), result.Kind()) - cmp, err := result.Compare(sc, &tt.result, collate.GetCollator(fieldTps[0].GetCollate())) + cmp, err := result.Compare(sc.TypeCtx(), &tt.result, collate.GetCollator(fieldTps[0].GetCollate())) require.NoError(t, err) require.Equal(t, 0, cmp) } @@ -963,7 +963,7 @@ func newDuration(dur time.Duration) types.Duration { } func newDateTime(t *testing.T, s string) types.Time { - tt, err := types.ParseDate(nil, s) + tt, err := types.ParseDate(types.DefaultStmtNoWarningContext, s) require.NoError(t, err) return tt } diff --git a/pkg/expression/evaluator_test.go b/pkg/expression/evaluator_test.go index 55c10319ab636..24ecee4186b92 100644 --- a/pkg/expression/evaluator_test.go +++ b/pkg/expression/evaluator_test.go @@ -15,7 +15,6 @@ package expression import ( - "sync/atomic" "testing" "time" @@ -26,6 +25,7 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/stretchr/testify/require" ) @@ -152,7 +152,7 @@ func TestSleep(t *testing.T) { start = time.Now() go func() { time.Sleep(1 * time.Second) - atomic.CompareAndSwapUint32(&ctx.GetSessionVars().Killed, 0, 1) + ctx.GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) }() ret, isNull, err = f.evalInt(chunk.Row{}) sub = time.Since(start) @@ -568,7 +568,7 @@ func TestUnaryOp(t *testing.T) { require.NoError(t, err) expect := types.NewDatum(tt.result) - ret, err := result.Compare(ctx.GetSessionVars().StmtCtx, &expect, collate.GetBinaryCollator()) + ret, err := result.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &expect, collate.GetBinaryCollator()) require.NoError(t, err) require.Equalf(t, 0, ret, "%v %s", tt.arg, tt.op) } diff --git a/pkg/expression/explain.go b/pkg/expression/explain.go index 523f4d89de59c..2591f5e18b743 100644 --- a/pkg/expression/explain.go +++ b/pkg/expression/explain.go @@ -65,6 +65,31 @@ func (expr *ScalarFunction) ExplainNormalizedInfo() string { return expr.explainInfo(true) } +// ExplainNormalizedInfo4InList implements the Expression interface. +func (expr *ScalarFunction) ExplainNormalizedInfo4InList() string { + var buffer bytes.Buffer + fmt.Fprintf(&buffer, "%s(", expr.FuncName.L) + switch expr.FuncName.L { + case ast.Cast: + for _, arg := range expr.GetArgs() { + buffer.WriteString(arg.ExplainNormalizedInfo4InList()) + buffer.WriteString(", ") + buffer.WriteString(expr.RetType.String()) + } + case ast.In: + buffer.WriteString("...") + default: + for i, arg := range expr.GetArgs() { + buffer.WriteString(arg.ExplainNormalizedInfo4InList()) + if i+1 < len(expr.GetArgs()) { + buffer.WriteString(", ") + } + } + } + buffer.WriteString(")") + return buffer.String() +} + // ExplainInfo implements the Expression interface. func (col *Column) ExplainInfo() string { return col.String() @@ -78,6 +103,14 @@ func (col *Column) ExplainNormalizedInfo() string { return "?" } +// ExplainNormalizedInfo4InList implements the Expression interface. +func (col *Column) ExplainNormalizedInfo4InList() string { + if col.OrigName != "" { + return col.OrigName + } + return "?" +} + // ExplainInfo implements the Expression interface. func (expr *Constant) ExplainInfo() string { dt, err := expr.Eval(chunk.Row{}) @@ -92,6 +125,11 @@ func (expr *Constant) ExplainNormalizedInfo() string { return "?" } +// ExplainNormalizedInfo4InList implements the Expression interface. +func (expr *Constant) ExplainNormalizedInfo4InList() string { + return "?" +} + func (expr *Constant) format(dt types.Datum) string { switch dt.Kind() { case types.KindNull: @@ -142,14 +180,21 @@ func ExplainExpressionList(exprs []Expression, schema *Schema) string { // In some scenarios, the expr's order may not be stable when executing multiple times. // So we add a sort to make its explain result stable. func SortedExplainExpressionList(exprs []Expression) []byte { - return sortedExplainExpressionList(exprs, false) + return sortedExplainExpressionList(exprs, false, false) +} + +// SortedExplainExpressionListIgnoreInlist generates explain information for a list of expressions in order. +func SortedExplainExpressionListIgnoreInlist(exprs []Expression) []byte { + return sortedExplainExpressionList(exprs, false, true) } -func sortedExplainExpressionList(exprs []Expression, normalized bool) []byte { +func sortedExplainExpressionList(exprs []Expression, normalized bool, ignoreInlist bool) []byte { buffer := bytes.NewBufferString("") exprInfos := make([]string, 0, len(exprs)) for _, expr := range exprs { - if normalized { + if ignoreInlist { + exprInfos = append(exprInfos, expr.ExplainNormalizedInfo4InList()) + } else if normalized { exprInfos = append(exprInfos, expr.ExplainNormalizedInfo()) } else { exprInfos = append(exprInfos, expr.ExplainInfo()) @@ -167,7 +212,7 @@ func sortedExplainExpressionList(exprs []Expression, normalized bool) []byte { // SortedExplainNormalizedExpressionList is same like SortedExplainExpressionList, but use for generating normalized information. func SortedExplainNormalizedExpressionList(exprs []Expression) []byte { - return sortedExplainExpressionList(exprs, true) + return sortedExplainExpressionList(exprs, true, false) } // SortedExplainNormalizedScalarFuncList is same like SortedExplainExpressionList, but use for generating normalized information. @@ -176,7 +221,7 @@ func SortedExplainNormalizedScalarFuncList(exprs []*ScalarFunction) []byte { for i := range exprs { expressions[i] = exprs[i] } - return sortedExplainExpressionList(expressions, true) + return sortedExplainExpressionList(expressions, true, false) } // ExplainColumnList generates explain information for a list of columns. diff --git a/pkg/expression/expression.go b/pkg/expression/expression.go index 4f0bb5843e68f..411c25fbafb90 100644 --- a/pkg/expression/expression.go +++ b/pkg/expression/expression.go @@ -183,6 +183,9 @@ type Expression interface { // ExplainNormalizedInfo returns operator normalized information for generating digest. ExplainNormalizedInfo() string + // ExplainNormalizedInfo4InList returns operator normalized information for plan digest. + ExplainNormalizedInfo4InList() string + // HashCode creates the hashcode for expression which can be used to identify itself from other expression. // It generated as the following: // Constant: ConstantFlag+encoded value diff --git a/pkg/expression/generator/time_vec.go b/pkg/expression/generator/time_vec.go index 39f11261056fd..a7e93da334b02 100644 --- a/pkg/expression/generator/time_vec.go +++ b/pkg/expression/generator/time_vec.go @@ -63,7 +63,7 @@ import ( continue }{{ end }} sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, {{if eq .Output.TypeName "String"}}getFsp4TimeAddSub{{else}}types.GetFsp{{end}}(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, {{if eq .Output.TypeName "String"}}getFsp4TimeAddSub{{else}}types.GetFsp{{end}}(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -171,11 +171,11 @@ func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result // calculate {{ if or (eq .SigName "builtinAddDatetimeAndDurationSig") (eq .SigName "builtinSubDatetimeAndDurationSig") }} {{ if eq $.FuncName "AddTime" }} - output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx, types.Duration{Duration: arg1, Fsp: -1}) + output, err := arg0.Add(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), types.Duration{Duration: arg1, Fsp: -1}) {{ else }} sc := b.ctx.GetSessionVars().StmtCtx arg1Duration := types.Duration{Duration: arg1, Fsp: -1} - output, err := arg0.Add(sc, arg1Duration.Neg()) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration.Neg()) {{ end }} if err != nil { return err @@ -184,14 +184,14 @@ func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result {{ else if or (eq .SigName "builtinAddDatetimeAndStringSig") (eq .SigName "builtinSubDatetimeAndStringSig") }} {{ if eq $.FuncName "AddTime" }} {{ template "ConvertStringToDuration" . }} - output, err := arg0.Add(sc, arg1Duration) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration) {{ else }} if !isDuration(arg1) { result.SetNull(i, true) // fixed: true continue } sc := b.ctx.GetSessionVars().StmtCtx - arg1Duration, _, err := types.ParseDuration(sc, arg1, types.GetFsp(arg1)) + arg1Duration, _, err := types.ParseDuration(sc.TypeCtx(), arg1, types.GetFsp(arg1)) if err != nil { if terror.ErrorEqual(err, types.ErrTruncatedWrongVal) { sc.AppendWarning(err) @@ -200,7 +200,7 @@ func (b *{{.SigName}}) vecEval{{ .Output.TypeName }}(input *chunk.Chunk, result } return err } - output, err := arg0.Add(sc, arg1Duration.Neg()) + output, err := arg0.Add(sc.TypeCtx(), arg1Duration.Neg()) {{ end }} if err != nil { return err diff --git a/pkg/expression/helper.go b/pkg/expression/helper.go index d0fce4ffb2597..0643db7f850e9 100644 --- a/pkg/expression/helper.go +++ b/pkg/expression/helper.go @@ -99,10 +99,10 @@ func GetTimeValue(ctx sessionctx.Context, v interface{}, tp byte, fsp int, expli return d, err } } else if lowerX == types.ZeroDatetimeStr { - value, err = types.ParseTimeFromNum(sc, 0, tp, fsp) + value, err = types.ParseTimeFromNum(sc.TypeCtx(), 0, tp, fsp) terror.Log(err) } else { - value, err = types.ParseTime(sc, x, tp, fsp, explicitTz) + value, err = types.ParseTime(sc.TypeCtx(), x, tp, fsp, explicitTz) if err != nil { return d, err } @@ -110,12 +110,12 @@ func GetTimeValue(ctx sessionctx.Context, v interface{}, tp byte, fsp int, expli case *driver.ValueExpr: switch x.Kind() { case types.KindString: - value, err = types.ParseTime(sc, x.GetString(), tp, fsp, nil) + value, err = types.ParseTime(sc.TypeCtx(), x.GetString(), tp, fsp, nil) if err != nil { return d, err } case types.KindInt64: - value, err = types.ParseTimeFromNum(sc, x.GetInt64(), tp, fsp) + value, err = types.ParseTimeFromNum(sc.TypeCtx(), x.GetInt64(), tp, fsp) if err != nil { return d, err } @@ -137,12 +137,12 @@ func GetTimeValue(ctx sessionctx.Context, v interface{}, tp byte, fsp int, expli return d, err } ft := types.NewFieldType(mysql.TypeLonglong) - xval, err := v.ConvertTo(ctx.GetSessionVars().StmtCtx, ft) + xval, err := v.ConvertTo(ctx.GetSessionVars().StmtCtx.TypeCtx(), ft) if err != nil { return d, err } - value, err = types.ParseTimeFromNum(sc, xval.GetInt64(), tp, fsp) + value, err = types.ParseTimeFromNum(sc.TypeCtx(), xval.GetInt64(), tp, fsp) if err != nil { return d, err } diff --git a/pkg/expression/integration_test/integration_test.go b/pkg/expression/integration_test/integration_test.go index fa40790a7b78d..2c78919fdc2ef 100644 --- a/pkg/expression/integration_test/integration_test.go +++ b/pkg/expression/integration_test/integration_test.go @@ -422,7 +422,7 @@ func TestFilterExtractFromDNF(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err, "error %v, for resolve name, expr %s", err, tt.exprStr) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err, "error %v, for build plan, expr %s", err, tt.exprStr) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) conds := make([]expression.Expression, len(selection.Conditions)) diff --git a/pkg/expression/typeinfer_test.go b/pkg/expression/typeinfer_test.go index f2cff3c08f9fc..8166f7488ddd4 100644 --- a/pkg/expression/typeinfer_test.go +++ b/pkg/expression/typeinfer_test.go @@ -129,7 +129,7 @@ func TestInferType(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmt, plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err, comment) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmt, ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmt, ret.InfoSchema) require.NoError(t, err, comment) tp := p.Schema().Columns[0].RetType require.Equal(t, tt.tp, tp.GetType(), comment) diff --git a/pkg/expression/util_test.go b/pkg/expression/util_test.go index a275871663dfb..0f4d70fa88bdc 100644 --- a/pkg/expression/util_test.go +++ b/pkg/expression/util_test.go @@ -580,6 +580,7 @@ func (m *MockExpr) resolveIndicesByVirtualExpr(schema *Schema) bool func (m *MockExpr) RemapColumn(_ map[int64]*Column) (Expression, error) { return m, nil } func (m *MockExpr) ExplainInfo() string { return "" } func (m *MockExpr) ExplainNormalizedInfo() string { return "" } +func (m *MockExpr) ExplainNormalizedInfo4InList() string { return "" } func (m *MockExpr) HashCode(sc *stmtctx.StatementContext) []byte { return nil } func (m *MockExpr) Vectorized() bool { return false } func (m *MockExpr) SupportReverseEval() bool { return false } diff --git a/pkg/infoschema/BUILD.bazel b/pkg/infoschema/BUILD.bazel index a1984931d77b8..0e62ee3953ca7 100644 --- a/pkg/infoschema/BUILD.bazel +++ b/pkg/infoschema/BUILD.bazel @@ -42,7 +42,6 @@ go_library( "//pkg/util/domainutil", "//pkg/util/execdetails", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/mock", "//pkg/util/pdapi", "//pkg/util/sem", diff --git a/pkg/infoschema/OWNERS b/pkg/infoschema/OWNERS new file mode 100644 index 0000000000000..d1d8285c8292e --- /dev/null +++ b/pkg/infoschema/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-infoschema diff --git a/pkg/infoschema/builder.go b/pkg/infoschema/builder.go index 3b030b5b1ad25..54f8d8a85dc58 100644 --- a/pkg/infoschema/builder.go +++ b/pkg/infoschema/builder.go @@ -35,7 +35,6 @@ import ( "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/util/domainutil" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" ) @@ -420,9 +419,9 @@ func updateAutoIDForExchangePartition(store kv.Storage, ptSchemaID, ptID, ntSche // Set both tables to the maximum auto IDs between normal table and partitioned table. newAutoIDs := meta.AutoIDGroup{ - RowID: mathutil.Max(ptAutoIDs.RowID, ntAutoIDs.RowID), - IncrementID: mathutil.Max(ptAutoIDs.IncrementID, ntAutoIDs.IncrementID), - RandomID: mathutil.Max(ptAutoIDs.RandomID, ntAutoIDs.RandomID), + RowID: max(ptAutoIDs.RowID, ntAutoIDs.RowID), + IncrementID: max(ptAutoIDs.IncrementID, ntAutoIDs.IncrementID), + RandomID: max(ptAutoIDs.RandomID, ntAutoIDs.RandomID), } err = t.GetAutoIDAccessors(ptSchemaID, ptID).Put(newAutoIDs) if err != nil { diff --git a/pkg/infoschema/perfschema/BUILD.bazel b/pkg/infoschema/perfschema/BUILD.bazel index ef3a6d0fa45a8..8291fbcab1bc9 100644 --- a/pkg/infoschema/perfschema/BUILD.bazel +++ b/pkg/infoschema/perfschema/BUILD.bazel @@ -25,6 +25,7 @@ go_library( "//pkg/table/tables", "//pkg/types", "//pkg/util", + "//pkg/util/pdapi", "//pkg/util/profile", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", @@ -49,6 +50,7 @@ go_test( "//pkg/store/mockstore", "//pkg/testkit", "//pkg/testkit/testsetup", + "//pkg/util/pdapi", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", "@io_opencensus_go//stats/view", diff --git a/pkg/infoschema/perfschema/tables.go b/pkg/infoschema/perfschema/tables.go index 3cab9bf16847f..efc5aefe0f297 100644 --- a/pkg/infoschema/perfschema/tables.go +++ b/pkg/infoschema/perfschema/tables.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/pkg/table/tables" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/pingcap/tidb/pkg/util/profile" ) @@ -241,18 +242,17 @@ func (vt *perfSchemaTable) getRows(ctx context.Context, sctx sessionctx.Context, interval := fmt.Sprintf("%d", profile.CPUProfileInterval/time.Second) fullRows, err = dataForRemoteProfile(sctx, "tikv", "/debug/pprof/profile?seconds="+interval, false) case tableNamePDProfileCPU: - interval := fmt.Sprintf("%d", profile.CPUProfileInterval/time.Second) - fullRows, err = dataForRemoteProfile(sctx, "pd", "/pd/api/v1/debug/pprof/profile?seconds="+interval, false) + fullRows, err = dataForRemoteProfile(sctx, "pd", pdapi.PProfProfileAPIWithInterval(profile.CPUProfileInterval), false) case tableNamePDProfileMemory: - fullRows, err = dataForRemoteProfile(sctx, "pd", "/pd/api/v1/debug/pprof/heap", false) + fullRows, err = dataForRemoteProfile(sctx, "pd", pdapi.PProfHeap, false) case tableNamePDProfileMutex: - fullRows, err = dataForRemoteProfile(sctx, "pd", "/pd/api/v1/debug/pprof/mutex", false) + fullRows, err = dataForRemoteProfile(sctx, "pd", pdapi.PProfMutex, false) case tableNamePDProfileAllocs: - fullRows, err = dataForRemoteProfile(sctx, "pd", "/pd/api/v1/debug/pprof/allocs", false) + fullRows, err = dataForRemoteProfile(sctx, "pd", pdapi.PProfAllocs, false) case tableNamePDProfileBlock: - fullRows, err = dataForRemoteProfile(sctx, "pd", "/pd/api/v1/debug/pprof/block", false) + fullRows, err = dataForRemoteProfile(sctx, "pd", pdapi.PProfBlock, false) case tableNamePDProfileGoroutines: - fullRows, err = dataForRemoteProfile(sctx, "pd", "/pd/api/v1/debug/pprof/goroutine?debug=2", true) + fullRows, err = dataForRemoteProfile(sctx, "pd", pdapi.PProfGoroutineWithDebugLevel(2), true) case tableNameSessionVariables: fullRows, err = infoschema.GetDataFromSessionVariables(ctx, sctx) case tableNameSessionConnectAttrs: diff --git a/pkg/infoschema/perfschema/tables_test.go b/pkg/infoschema/perfschema/tables_test.go index 2a15fe9f82c41..b68fcd525aa18 100644 --- a/pkg/infoschema/perfschema/tables_test.go +++ b/pkg/infoschema/perfschema/tables_test.go @@ -31,6 +31,7 @@ import ( "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/store/mockstore" "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/stretchr/testify/require" "go.opencensus.io/stats/view" ) @@ -151,12 +152,12 @@ func TestTiKVProfileCPU(t *testing.T) { } // mock PD profile - router.HandleFunc("/pd/api/v1/debug/pprof/profile", copyHandler("testdata/test.pprof")) - router.HandleFunc("/pd/api/v1/debug/pprof/heap", handlerFactory("heap")) - router.HandleFunc("/pd/api/v1/debug/pprof/mutex", handlerFactory("mutex")) - router.HandleFunc("/pd/api/v1/debug/pprof/allocs", handlerFactory("allocs")) - router.HandleFunc("/pd/api/v1/debug/pprof/block", handlerFactory("block")) - router.HandleFunc("/pd/api/v1/debug/pprof/goroutine", handlerFactory("goroutine", 2)) + router.HandleFunc(pdapi.PProfProfile, copyHandler("testdata/test.pprof")) + router.HandleFunc(pdapi.PProfHeap, handlerFactory("heap")) + router.HandleFunc(pdapi.PProfMutex, handlerFactory("mutex")) + router.HandleFunc(pdapi.PProfAllocs, handlerFactory("allocs")) + router.HandleFunc(pdapi.PProfBlock, handlerFactory("block")) + router.HandleFunc(pdapi.PProfGoroutine, handlerFactory("goroutine", 2)) tk.MustQuery("select * from pd_profile_cpu where depth < 3") warnings = tk.Session().GetSessionVars().StmtCtx.GetWarnings() diff --git a/pkg/infoschema/tables.go b/pkg/infoschema/tables.go index 53704292118b0..3a7f48a79ecee 100644 --- a/pkg/infoschema/tables.go +++ b/pkg/infoschema/tables.go @@ -2395,7 +2395,7 @@ func FetchClusterServerInfoWithoutPrivilegeCheck(ctx context.Context, sctx sessi for i, srv := range serversInfo { address := srv.Address remote := address - if srv.ServerType == "tidb" { + if srv.ServerType == "tidb" || srv.ServerType == "tiproxy" { remote = srv.StatusAddr } wg.Add(1) diff --git a/pkg/infoschema/test/clustertablestest/BUILD.bazel b/pkg/infoschema/test/clustertablestest/BUILD.bazel index 8d393c6d7bd1e..3c6e587dbccaf 100644 --- a/pkg/infoschema/test/clustertablestest/BUILD.bazel +++ b/pkg/infoschema/test/clustertablestest/BUILD.bazel @@ -38,6 +38,7 @@ go_test( "//pkg/testkit/testsetup", "//pkg/types", "//pkg/util", + "//pkg/util/dbterror/exeerrors", "//pkg/util/gctuner", "//pkg/util/memory", "//pkg/util/pdapi", diff --git a/pkg/infoschema/test/clustertablestest/cluster_tables_test.go b/pkg/infoschema/test/clustertablestest/cluster_tables_test.go index 00b8016a1d808..c741b42f7eb29 100644 --- a/pkg/infoschema/test/clustertablestest/cluster_tables_test.go +++ b/pkg/infoschema/test/clustertablestest/cluster_tables_test.go @@ -49,7 +49,7 @@ import ( "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/testkit/external" "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/pdapi" "github.com/pingcap/tidb/pkg/util/resourcegrouptag" "github.com/pingcap/tidb/pkg/util/set" @@ -717,7 +717,7 @@ select * from t1; err = tk.QueryToErr("select * from `information_schema`.`slow_query` where time > '2022-04-14 00:00:00' and time < '2022-04-15 00:00:00'") require.Error(t, err, quota) - require.Contains(t, err.Error(), memory.PanicMemoryExceedWarnMsg, quota) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } memQuotas := []int{128, 512, 1024, 2048, 4096} for _, quota := range memQuotas { diff --git a/pkg/lock/OWNERS b/pkg/lock/OWNERS new file mode 100644 index 0000000000000..a70e8d7189b99 --- /dev/null +++ b/pkg/lock/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-lock diff --git a/pkg/meta/OWNERS b/pkg/meta/OWNERS new file mode 100644 index 0000000000000..e7d7e1fb58536 --- /dev/null +++ b/pkg/meta/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-meta diff --git a/pkg/meta/autoid/BUILD.bazel b/pkg/meta/autoid/BUILD.bazel index 107f9f46cf768..523258d50b831 100644 --- a/pkg/meta/autoid/BUILD.bazel +++ b/pkg/meta/autoid/BUILD.bazel @@ -24,7 +24,6 @@ go_library( "//pkg/util/etcd", "//pkg/util/execdetails", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/tracing", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/meta/autoid/autoid.go b/pkg/meta/autoid/autoid.go index 0555868a52cb7..6a3d9f94bc27d 100644 --- a/pkg/meta/autoid/autoid.go +++ b/pkg/meta/autoid/autoid.go @@ -37,7 +37,6 @@ import ( "github.com/pingcap/tidb/pkg/util/etcd" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/tracing" "github.com/tikv/client-go/v2/txnkv/txnsnapshot" tikvutil "github.com/tikv/client-go/v2/util" @@ -353,8 +352,8 @@ func (alloc *allocator) rebase4Unsigned(ctx context.Context, requiredBase uint64 } uCurrentEnd := uint64(currentEnd) if allocIDs { - newBase = mathutil.Max(uCurrentEnd, requiredBase) - newEnd = mathutil.Min(math.MaxUint64-uint64(alloc.step), newBase) + uint64(alloc.step) + newBase = max(uCurrentEnd, requiredBase) + newEnd = min(math.MaxUint64-uint64(alloc.step), newBase) + uint64(alloc.step) } else { if uCurrentEnd >= requiredBase { newBase = uCurrentEnd @@ -412,8 +411,8 @@ func (alloc *allocator) rebase4Signed(ctx context.Context, requiredBase int64, a return err1 } if allocIDs { - newBase = mathutil.Max(currentEnd, requiredBase) - newEnd = mathutil.Min(math.MaxInt64-alloc.step, newBase) + alloc.step + newBase = max(currentEnd, requiredBase) + newEnd = min(math.MaxInt64-alloc.step, newBase) + alloc.step } else { if currentEnd >= requiredBase { newBase = currentEnd @@ -872,7 +871,7 @@ func SeekToFirstAutoIDUnSigned(base, increment, offset uint64) uint64 { return nr } -func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, offset int64) (min int64, max int64, err error) { +func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, offset int64) (mini int64, max int64, err error) { // Check offset rebase if necessary. if offset-1 > alloc.base { if err := alloc.rebase4Signed(ctx, offset-1, true); err != nil { @@ -926,7 +925,7 @@ func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, o if nextStep < n1 { nextStep = n1 } - tmpStep := mathutil.Min(math.MaxInt64-newBase, nextStep) + tmpStep := min(math.MaxInt64-newBase, nextStep) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed @@ -953,12 +952,12 @@ func (alloc *allocator) alloc4Signed(ctx context.Context, n uint64, increment, o zap.Uint64("to ID", uint64(alloc.base+n1)), zap.Int64("table ID", alloc.tbID), zap.Int64("database ID", alloc.dbID)) - min = alloc.base + mini = alloc.base alloc.base += n1 - return min, alloc.base, nil + return mini, alloc.base, nil } -func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, offset int64) (min int64, max int64, err error) { +func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, offset int64) (mini int64, max int64, err error) { // Check offset rebase if necessary. if uint64(offset-1) > uint64(alloc.base) { if err := alloc.rebase4Unsigned(ctx, uint64(offset-1), true); err != nil { @@ -1017,7 +1016,7 @@ func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, if nextStep < n1 { nextStep = n1 } - tmpStep := int64(mathutil.Min(math.MaxUint64-uint64(newBase), uint64(nextStep))) + tmpStep := int64(min(math.MaxUint64-uint64(newBase), uint64(nextStep))) // The global rest is not enough for alloc. if tmpStep < n1 { return ErrAutoincReadFailed @@ -1044,10 +1043,10 @@ func (alloc *allocator) alloc4Unsigned(ctx context.Context, n uint64, increment, zap.Uint64("to ID", uint64(alloc.base+n1)), zap.Int64("table ID", alloc.tbID), zap.Int64("database ID", alloc.dbID)) - min = alloc.base + mini = alloc.base // Use uint64 n directly. alloc.base = int64(uint64(alloc.base) + uint64(n1)) - return min, alloc.base, nil + return mini, alloc.base, nil } func getAllocatorStatsFromCtx(ctx context.Context) (context.Context, *AllocatorRuntimeStats, **tikvutil.CommitDetails) { diff --git a/pkg/metrics/BUILD.bazel b/pkg/metrics/BUILD.bazel index d302f1bbd7ea4..135275718bd93 100644 --- a/pkg/metrics/BUILD.bazel +++ b/pkg/metrics/BUILD.bazel @@ -35,7 +35,6 @@ go_library( "//pkg/parser/terror", "//pkg/timer/metrics", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/promutil", "@com_github_pingcap_errors//:errors", "@com_github_prometheus_client_golang//prometheus", diff --git a/pkg/metrics/telemetry.go b/pkg/metrics/telemetry.go index 0cb6439053eec..ea07b9b83e520 100644 --- a/pkg/metrics/telemetry.go +++ b/pkg/metrics/telemetry.go @@ -15,7 +15,6 @@ package metrics import ( - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" ) @@ -402,7 +401,7 @@ func (c TablePartitionUsageCounter) Cal(rhs TablePartitionUsageCounter) TablePar TablePartitionRangeColumnsGt2Cnt: c.TablePartitionRangeColumnsGt2Cnt - rhs.TablePartitionRangeColumnsGt2Cnt, TablePartitionRangeColumnsGt3Cnt: c.TablePartitionRangeColumnsGt3Cnt - rhs.TablePartitionRangeColumnsGt3Cnt, TablePartitionListColumnsCnt: c.TablePartitionListColumnsCnt - rhs.TablePartitionListColumnsCnt, - TablePartitionMaxPartitionsCnt: mathutil.Max(c.TablePartitionMaxPartitionsCnt-rhs.TablePartitionMaxPartitionsCnt, rhs.TablePartitionMaxPartitionsCnt), + TablePartitionMaxPartitionsCnt: max(c.TablePartitionMaxPartitionsCnt-rhs.TablePartitionMaxPartitionsCnt, rhs.TablePartitionMaxPartitionsCnt), TablePartitionCreateIntervalPartitionsCnt: c.TablePartitionCreateIntervalPartitionsCnt - rhs.TablePartitionCreateIntervalPartitionsCnt, TablePartitionAddIntervalPartitionsCnt: c.TablePartitionAddIntervalPartitionsCnt - rhs.TablePartitionAddIntervalPartitionsCnt, TablePartitionDropIntervalPartitionsCnt: c.TablePartitionDropIntervalPartitionsCnt - rhs.TablePartitionDropIntervalPartitionsCnt, @@ -423,7 +422,7 @@ func ResetTablePartitionCounter(pre TablePartitionUsageCounter) TablePartitionUs TablePartitionRangeColumnsGt2Cnt: readCounter(TelemetryTablePartitionRangeColumnsGt2Cnt), TablePartitionRangeColumnsGt3Cnt: readCounter(TelemetryTablePartitionRangeColumnsGt3Cnt), TablePartitionListColumnsCnt: readCounter(TelemetryTablePartitionListColumnsCnt), - TablePartitionMaxPartitionsCnt: mathutil.Max(readCounter(TelemetryTablePartitionMaxPartitionsCnt)-pre.TablePartitionMaxPartitionsCnt, pre.TablePartitionMaxPartitionsCnt), + TablePartitionMaxPartitionsCnt: max(readCounter(TelemetryTablePartitionMaxPartitionsCnt)-pre.TablePartitionMaxPartitionsCnt, pre.TablePartitionMaxPartitionsCnt), TablePartitionReorganizePartitionCnt: readCounter(TelemetryReorganizePartitionCnt), } } diff --git a/pkg/owner/OWNERS b/pkg/owner/OWNERS new file mode 100644 index 0000000000000..41cf0d11840e9 --- /dev/null +++ b/pkg/owner/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-owner diff --git a/pkg/owner/manager.go b/pkg/owner/manager.go index e88c1c9398df1..b20051c1acf38 100644 --- a/pkg/owner/manager.go +++ b/pkg/owner/manager.go @@ -76,20 +76,25 @@ type OpType byte // List operation of types. const ( - OpNone OpType = 0 - OpGetUpgradingState OpType = 1 + OpNone OpType = 0 + OpSyncUpgradingState OpType = 1 ) // String implements fmt.Stringer interface. func (ot OpType) String() string { switch ot { - case OpGetUpgradingState: - return "get upgrading state" + case OpSyncUpgradingState: + return "sync upgrading state" default: return "none" } } +// IsSyncedUpgradingState represents whether the upgrading state is synchronized. +func (ot OpType) IsSyncedUpgradingState() bool { + return ot == OpSyncUpgradingState +} + // DDLOwnerChecker is used to check whether tidb is owner. type DDLOwnerChecker interface { // IsOwner returns whether the ownerManager is the owner. diff --git a/pkg/owner/manager_test.go b/pkg/owner/manager_test.go index 5bf2801578296..eff35dbd82108 100644 --- a/pkg/owner/manager_test.go +++ b/pkg/owner/manager_test.go @@ -139,17 +139,20 @@ func TestSetAndGetOwnerOpValue(t *testing.T) { op, err := owner.GetOwnerOpValue(context.Background(), tInfo.client, DDLOwnerKey, "log prefix") require.NoError(t, err) require.Equal(t, op, owner.OpNone) - err = manager.SetOwnerOpValue(context.Background(), owner.OpGetUpgradingState) + require.False(t, op.IsSyncedUpgradingState()) + err = manager.SetOwnerOpValue(context.Background(), owner.OpSyncUpgradingState) require.NoError(t, err) op, err = owner.GetOwnerOpValue(context.Background(), tInfo.client, DDLOwnerKey, "log prefix") require.NoError(t, err) - require.Equal(t, op, owner.OpGetUpgradingState) + require.Equal(t, op, owner.OpSyncUpgradingState) + require.True(t, op.IsSyncedUpgradingState()) // update the same as the original value - err = manager.SetOwnerOpValue(context.Background(), owner.OpGetUpgradingState) + err = manager.SetOwnerOpValue(context.Background(), owner.OpSyncUpgradingState) require.NoError(t, err) op, err = owner.GetOwnerOpValue(context.Background(), tInfo.client, DDLOwnerKey, "log prefix") require.NoError(t, err) - require.Equal(t, op, owner.OpGetUpgradingState) + require.Equal(t, op, owner.OpSyncUpgradingState) + require.True(t, op.IsSyncedUpgradingState()) // test del owner key when SetOwnerOpValue require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/owner/MockDelOwnerKey", `return("delOwnerKeyAndNotOwner")`)) err = manager.SetOwnerOpValue(context.Background(), owner.OpNone) @@ -158,6 +161,7 @@ func TestSetAndGetOwnerOpValue(t *testing.T) { require.NotNil(t, err) require.Equal(t, concurrency.ErrElectionNoLeader.Error(), err.Error()) require.Equal(t, op, owner.OpNone) + require.False(t, op.IsSyncedUpgradingState()) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/owner/MockDelOwnerKey")) // Let ddl run for the owner again. @@ -167,7 +171,7 @@ func TestSetAndGetOwnerOpValue(t *testing.T) { // Mock the manager become not owner because the owner is deleted(like TTL is timeout). // And then the manager campaigns the owner again, and become the owner. require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/owner/MockDelOwnerKey", `return("onlyDelOwnerKey")`)) - err = manager.SetOwnerOpValue(context.Background(), owner.OpGetUpgradingState) + err = manager.SetOwnerOpValue(context.Background(), owner.OpSyncUpgradingState) require.Error(t, err, "put owner key failed, cmp is false") isOwner = checkOwner(tInfo.ddl, true) require.True(t, isOwner) @@ -199,11 +203,11 @@ func TestGetOwnerOpValueBeforeSet(t *testing.T) { require.NoError(t, err) require.Equal(t, op, owner.OpNone) require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/owner/MockNotSetOwnerOp")) - err = manager.SetOwnerOpValue(context.Background(), owner.OpGetUpgradingState) + err = manager.SetOwnerOpValue(context.Background(), owner.OpSyncUpgradingState) require.NoError(t, err) op, err = owner.GetOwnerOpValue(context.Background(), nil, DDLOwnerKey, "log prefix") require.NoError(t, err) - require.Equal(t, op, owner.OpGetUpgradingState) + require.Equal(t, op, owner.OpSyncUpgradingState) } func TestCluster(t *testing.T) { diff --git a/pkg/parser/ast/ddl.go b/pkg/parser/ast/ddl.go index 8d67dbc51e34e..2ed5135215208 100644 --- a/pkg/parser/ast/ddl.go +++ b/pkg/parser/ast/ddl.go @@ -4381,16 +4381,16 @@ type RecoverTableStmt struct { // Restore implements Node interface. func (n *RecoverTableStmt) Restore(ctx *format.RestoreCtx) error { ctx.WriteKeyWord("RECOVER TABLE ") - if n.JobID != 0 { - ctx.WriteKeyWord("BY JOB ") - ctx.WritePlainf("%d", n.JobID) - } else { + if n.Table != nil { if err := n.Table.Restore(ctx); err != nil { return errors.Annotate(err, "An error occurred while splicing RecoverTableStmt Table") } if n.JobNum > 0 { ctx.WritePlainf(" %d", n.JobNum) } + } else { + ctx.WriteKeyWord("BY JOB ") + ctx.WritePlainf("%d", n.JobID) } return nil } diff --git a/pkg/parser/model/reorg.go b/pkg/parser/model/reorg.go index 927f842e20f13..68a9f27a0d374 100644 --- a/pkg/parser/model/reorg.go +++ b/pkg/parser/model/reorg.go @@ -31,8 +31,17 @@ type DDLReorgMeta struct { IsDistReorg bool `json:"is_dist_reorg"` UseCloudStorage bool `json:"use_cloud_storage"` ResourceGroupName string `json:"resource_group_name"` + Version int64 `json:"version"` } +const ( + // ReorgMetaVersion0 is the minimum version of DDLReorgMeta. + ReorgMetaVersion0 = int64(0) + // CurrentReorgMetaVersion is the current version of DDLReorgMeta. + // For fix #46306(whether end key is included or not in the table range) to add the version to 1. + CurrentReorgMetaVersion = int64(1) +) + // ReorgType indicates which process is used for the data reorganization. type ReorgType int8 diff --git a/pkg/parser/parser_test.go b/pkg/parser/parser_test.go index f9b8fb8681096..4c5e5ee39f461 100644 --- a/pkg/parser/parser_test.go +++ b/pkg/parser/parser_test.go @@ -3373,6 +3373,7 @@ func TestDDL(t *testing.T) { {"recover table by job 11", true, "RECOVER TABLE BY JOB 11"}, {"recover table by job 11,12,13", false, ""}, {"recover table by job", false, ""}, + {"recover table by job 0", true, "RECOVER TABLE BY JOB 0"}, {"recover table t1", true, "RECOVER TABLE `t1`"}, {"recover table t1,t2", false, ""}, {"recover table ", false, ""}, diff --git a/pkg/planner/cardinality/pseudo.go b/pkg/planner/cardinality/pseudo.go index bd0d62a4b7d7b..7a8dba62a17fc 100644 --- a/pkg/planner/cardinality/pseudo.go +++ b/pkg/planner/cardinality/pseudo.go @@ -217,7 +217,7 @@ func getPseudoRowCountByColumnRanges(sc *stmtctx.StatementContext, tableRowCount } else if ran.HighVal[colIdx].Kind() == types.KindMaxValue { rowCount += tableRowCount / pseudoLessRate } else { - compare, err := ran.LowVal[colIdx].Compare(sc, &ran.HighVal[colIdx], ran.Collators[colIdx]) + compare, err := ran.LowVal[colIdx].Compare(sc.TypeCtx(), &ran.HighVal[colIdx], ran.Collators[colIdx]) if err != nil { return 0, errors.Trace(err) } diff --git a/pkg/planner/cardinality/row_count_column.go b/pkg/planner/cardinality/row_count_column.go index f9f075dc818f6..ee8a6220655de 100644 --- a/pkg/planner/cardinality/row_count_column.go +++ b/pkg/planner/cardinality/row_count_column.go @@ -187,7 +187,7 @@ func GetColumnRowCount(sctx sessionctx.Context, c *statistics.Column, ranges []* if lowVal.Kind() == types.KindString { lowVal.SetBytes(collate.GetCollator(lowVal.Collation()).Key(lowVal.GetString())) } - cmp, err := lowVal.Compare(sc, &highVal, collate.GetBinaryCollator()) + cmp, err := lowVal.Compare(sc.TypeCtx(), &highVal, collate.GetBinaryCollator()) if err != nil { return 0, errors.Trace(err) } diff --git a/pkg/planner/cardinality/row_count_index.go b/pkg/planner/cardinality/row_count_index.go index 06ff7c9627cf3..eef562fd59a40 100644 --- a/pkg/planner/cardinality/row_count_index.go +++ b/pkg/planner/cardinality/row_count_index.go @@ -515,7 +515,7 @@ func betweenRowCountOnIndex(sctx sessionctx.Context, idx *statistics.Index, l, r func getOrdinalOfRangeCond(sc *stmtctx.StatementContext, ran *ranger.Range) int { for i := range ran.LowVal { a, b := ran.LowVal[i], ran.HighVal[i] - cmp, err := a.Compare(sc, &b, ran.Collators[0]) + cmp, err := a.Compare(sc.TypeCtx(), &b, ran.Collators[0]) if err != nil { return 0 } diff --git a/pkg/planner/cardinality/selectivity_test.go b/pkg/planner/cardinality/selectivity_test.go index 2706e1d89e6da..9c254512bc275 100644 --- a/pkg/planner/cardinality/selectivity_test.go +++ b/pkg/planner/cardinality/selectivity_test.go @@ -88,7 +88,7 @@ func BenchmarkSelectivity(b *testing.B) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoErrorf(b, err, "for %s", exprs) - p, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(context.Background(), sctx, stmts[0], ret.InfoSchema) require.NoErrorf(b, err, "error %v, for building plan, expr %s", err, exprs) file, err := os.Create("cpu.profile") @@ -446,7 +446,7 @@ func TestSelectivity(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoErrorf(t, err, "for expr %s", tt.exprs) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoErrorf(t, err, "for building plan, expr %s", err, tt.exprs) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) @@ -504,7 +504,7 @@ func TestDNFCondSelectivity(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoErrorf(t, err, "error %v, for sql %s", err, tt) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoErrorf(t, err, "error %v, for building plan, sql %s", err, tt) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) diff --git a/pkg/planner/cardinality/trace_test.go b/pkg/planner/cardinality/trace_test.go index 07b8cbbe8149c..186aae1a45202 100644 --- a/pkg/planner/cardinality/trace_test.go +++ b/pkg/planner/cardinality/trace_test.go @@ -205,7 +205,7 @@ func TestTraceDebugSelectivity(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmt, plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), sctx, stmt, ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(context.Background(), sctx, stmt, ret.InfoSchema) require.NoError(t, err) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) diff --git a/pkg/planner/cascades/optimize_test.go b/pkg/planner/cascades/optimize_test.go index 940cc2542727e..977405b4f2540 100644 --- a/pkg/planner/cascades/optimize_test.go +++ b/pkg/planner/cascades/optimize_test.go @@ -42,7 +42,7 @@ func TestImplGroupZeroCost(t *testing.T) { stmt, err := p.ParseOneStmt("select t1.a, t2.a from t as t1 left join t as t2 on t1.a = t2.a where t1.a < 1.0", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -69,7 +69,7 @@ func TestInitGroupSchema(t *testing.T) { stmt, err := p.ParseOneStmt("select a from t", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -94,7 +94,7 @@ func TestFillGroupStats(t *testing.T) { stmt, err := p.ParseOneStmt("select * from t t1 join t t2 on t1.a = t2.a", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -128,7 +128,7 @@ func TestPreparePossibleProperties(t *testing.T) { stmt, err := p.ParseOneStmt("select f, sum(a) from t group by f", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -225,7 +225,7 @@ func TestAppliedRuleSet(t *testing.T) { stmt, err := p.ParseOneStmt("select 1", "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) diff --git a/pkg/planner/cascades/stringer_test.go b/pkg/planner/cascades/stringer_test.go index 1356984d54e92..14359e6de8cf1 100644 --- a/pkg/planner/cascades/stringer_test.go +++ b/pkg/planner/cascades/stringer_test.go @@ -61,7 +61,7 @@ func TestGroupStringer(t *testing.T) { stmt, err := p.ParseOneStmt(sql, "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) diff --git a/pkg/planner/cascades/transformation_rules_test.go b/pkg/planner/cascades/transformation_rules_test.go index a6669629ab25d..44af39a46d000 100644 --- a/pkg/planner/cascades/transformation_rules_test.go +++ b/pkg/planner/cascades/transformation_rules_test.go @@ -44,7 +44,7 @@ func testGroupToString(t *testing.T, input []string, output []struct { stmt, err := p.ParseOneStmt(sql, "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) @@ -96,7 +96,7 @@ func TestAggPushDownGather(t *testing.T) { stmt, err := p.ParseOneStmt(sql, "", "") require.NoError(t, err) - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt, is) require.NoError(t, err) logic, ok := plan.(plannercore.LogicalPlan) diff --git a/pkg/planner/core/BUILD.bazel b/pkg/planner/core/BUILD.bazel index 8a42db25ac49f..4fdc27e94063b 100644 --- a/pkg/planner/core/BUILD.bazel +++ b/pkg/planner/core/BUILD.bazel @@ -44,6 +44,7 @@ go_library( "point_get_plan.go", "preprocess.go", "property_cols_prune.go", + "recheck_cte.go", "resolve_indices.go", "rule_aggregation_elimination.go", "rule_aggregation_push_down.go", diff --git a/pkg/planner/core/casetest/BUILD.bazel b/pkg/planner/core/casetest/BUILD.bazel index 1f88dc86084d3..037ae24961c47 100644 --- a/pkg/planner/core/casetest/BUILD.bazel +++ b/pkg/planner/core/casetest/BUILD.bazel @@ -12,7 +12,7 @@ go_test( ], data = glob(["testdata/**"]), flaky = True, - shard_count = 19, + shard_count = 21, deps = [ "//pkg/domain", "//pkg/parser", diff --git a/pkg/planner/core/casetest/plan_test.go b/pkg/planner/core/casetest/plan_test.go index 9b2c386a8641d..ce5ef805f7d11 100644 --- a/pkg/planner/core/casetest/plan_test.go +++ b/pkg/planner/core/casetest/plan_test.go @@ -154,6 +154,81 @@ func TestNormalizedPlan(t *testing.T) { } } +func TestPlanDigest4InList(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t") + tk.MustExec("create table t (a int);") + tk.MustExec("set global tidb_ignore_inlist_plan_digest=true;") + tk.Session().GetSessionVars().PlanID.Store(0) + queriesGroup1 := []string{ + "select * from t where a in (1, 2);", + "select a in (1, 2) from t;", + } + queriesGroup2 := []string{ + "select * from t where a in (1, 2, 3);", + "select a in (1, 2, 3) from t;", + } + for i := 0; i < len(queriesGroup1); i++ { + query1 := queriesGroup1[i] + query2 := queriesGroup2[i] + t.Run(query1+" vs "+query2, func(t *testing.T) { + tk.MustExec(query1) + info1 := tk.Session().ShowProcess() + require.NotNil(t, info1) + p1, ok := info1.Plan.(core.Plan) + require.True(t, ok) + _, digest1 := core.NormalizePlan(p1) + tk.MustExec(query2) + info2 := tk.Session().ShowProcess() + require.NotNil(t, info2) + p2, ok := info2.Plan.(core.Plan) + require.True(t, ok) + _, digest2 := core.NormalizePlan(p2) + require.Equal(t, digest1, digest2) + }) + } +} + +func TestIssue47634(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec("drop table if exists t3,t4") + tk.MustExec("create table t3(a int, b int, c int);") + tk.MustExec("create table t4(a int, b int, c int, primary key (a, b) clustered);") + tk.MustExec("create table t5(a int, b int, c int, key idx_a_b (a, b));") + tk.Session().GetSessionVars().PlanID.Store(0) + queriesGroup1 := []string{ + "explain select /*+ inl_join(t4) */ * from t3 join t4 on t3.b = t4.b where t4.a = 1;", + "explain select /*+ inl_join(t5) */ * from t3 join t5 on t3.b = t5.b where t5.a = 1;", + } + queriesGroup2 := []string{ + "explain select /*+ inl_join(t4) */ * from t3 join t4 on t3.b = t4.b where t4.a = 2;", + "explain select /*+ inl_join(t5) */ * from t3 join t5 on t3.b = t5.b where t5.a = 2;", + } + for i := 0; i < len(queriesGroup1); i++ { + query1 := queriesGroup1[i] + query2 := queriesGroup2[i] + t.Run(query1+" vs "+query2, func(t *testing.T) { + tk.MustExec(query1) + info1 := tk.Session().ShowProcess() + require.NotNil(t, info1) + p1, ok := info1.Plan.(core.Plan) + require.True(t, ok) + _, digest1 := core.NormalizePlan(p1) + tk.MustExec(query2) + info2 := tk.Session().ShowProcess() + require.NotNil(t, info2) + p2, ok := info2.Plan.(core.Plan) + require.True(t, ok) + _, digest2 := core.NormalizePlan(p2) + require.Equal(t, digest1, digest2) + }) + } +} + func TestNormalizedPlanForDiffStore(t *testing.T) { store, dom := testkit.CreateMockStoreAndDomain(t) tk := testkit.NewTestKit(t, store) diff --git a/pkg/planner/core/casetest/testdata/plan_normalized_suite_out.json b/pkg/planner/core/casetest/testdata/plan_normalized_suite_out.json index c7435e182fcf5..5d6a70fdd8373 100644 --- a/pkg/planner/core/casetest/testdata/plan_normalized_suite_out.json +++ b/pkg/planner/core/casetest/testdata/plan_normalized_suite_out.json @@ -95,7 +95,7 @@ " │ └─Selection cop gt(test.t1.c, ?)", " │ └─TableFullScan cop table:t1, range:[?,?], keep order:false", " └─TableReader root ", - " └─TableRangeScan cop table:t2, range: decided by [test.t1.a], keep order:false" + " └─TableRangeScan cop table:t2, keep order:false" ] }, { @@ -128,7 +128,7 @@ " │ └─Selection cop gt(test.t1.c, ?)", " │ └─TableFullScan cop table:t1, range:[?,?], keep order:false", " └─TableReader root ", - " └─TableRangeScan cop table:t2, range: decided by [test.t1.a], keep order:false" + " └─TableRangeScan cop table:t2, keep order:false" ] }, { diff --git a/pkg/planner/core/explain.go b/pkg/planner/core/explain.go index 1c3166b1cb71c..a0d7afa09f4b9 100644 --- a/pkg/planner/core/explain.go +++ b/pkg/planner/core/explain.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/planner/property" "github.com/pingcap/tidb/pkg/planner/util" + "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/logutil" @@ -183,10 +184,11 @@ func (p *PhysicalTableScan) ExplainNormalizedInfo() string { func (p *PhysicalTableScan) OperatorInfo(normalized bool) string { var buffer strings.Builder if len(p.rangeInfo) > 0 { - // TODO: deal with normalized case - buffer.WriteString("range: decided by ") - buffer.WriteString(p.rangeInfo) - buffer.WriteString(", ") + if !normalized { + buffer.WriteString("range: decided by ") + buffer.WriteString(p.rangeInfo) + buffer.WriteString(", ") + } } else if p.haveCorCol() { if normalized { buffer.WriteString("range: decided by ") @@ -364,6 +366,9 @@ func (p *PhysicalSelection) ExplainInfo() string { // ExplainNormalizedInfo implements Plan interface. func (p *PhysicalSelection) ExplainNormalizedInfo() string { + if variable.IgnoreInlistPlanDigest.Load() { + return string(expression.SortedExplainExpressionListIgnoreInlist(p.Conditions)) + } return string(expression.SortedExplainNormalizedExpressionList(p.Conditions)) } @@ -402,6 +407,9 @@ func (p *PhysicalExpand) explainInfoV2() string { // ExplainNormalizedInfo implements Plan interface. func (p *PhysicalProjection) ExplainNormalizedInfo() string { + if variable.IgnoreInlistPlanDigest.Load() { + return string(expression.SortedExplainExpressionListIgnoreInlist(p.Exprs)) + } return string(expression.SortedExplainNormalizedExpressionList(p.Exprs)) } diff --git a/pkg/planner/core/handle_cols.go b/pkg/planner/core/handle_cols.go index c745e3d1fb4c3..13c07443501f9 100644 --- a/pkg/planner/core/handle_cols.go +++ b/pkg/planner/core/handle_cols.go @@ -174,7 +174,7 @@ func (cb *CommonHandleCols) Compare(a, b []types.Datum, ctors []collate.Collator for i, col := range cb.columns { aDatum := &a[col.Index] bDatum := &b[col.Index] - cmp, err := aDatum.Compare(cb.sc, bDatum, ctors[i]) + cmp, err := aDatum.Compare(cb.sc.TypeCtx(), bDatum, ctors[i]) if err != nil { return 0, err } @@ -288,7 +288,7 @@ func (*IntHandleCols) NumCols() int { func (ib *IntHandleCols) Compare(a, b []types.Datum, ctors []collate.Collator) (int, error) { aVal := &a[ib.col.Index] bVal := &b[ib.col.Index] - return aVal.Compare(nil, bVal, ctors[ib.col.Index]) + return aVal.Compare(types.DefaultStmtNoWarningContext, bVal, ctors[ib.col.Index]) } // GetFieldsTypes implements the kv.HandleCols interface. diff --git a/pkg/planner/core/integration_test.go b/pkg/planner/core/integration_test.go index 1a6688649ecea..2812f778e25d9 100644 --- a/pkg/planner/core/integration_test.go +++ b/pkg/planner/core/integration_test.go @@ -2529,6 +2529,15 @@ func TestIssue46298(t *testing.T) { tk.MustQuery("select *, first_value(v) over (partition by p order by o range between 3.1 preceding and 2.9 following) as a from test.first_range;") } +func TestIssue45044(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec(`use test`) + tk.MustExec(`set tidb_enable_ordered_result_mode = on`) + tk.MustExec(`create table t1(c1 int)`) + tk.MustQuery(`select * from t1 group by t1.c1 having count(1) > 1 order by count(1) limit 10`).Check(testkit.Rows()) // no error +} + // https://github.com/pingcap/tidb/issues/41458 func TestIssue41458(t *testing.T) { store := testkit.CreateMockStore(t) diff --git a/pkg/planner/core/logical_plan_builder.go b/pkg/planner/core/logical_plan_builder.go index ae77f237c13ae..3072594750d92 100644 --- a/pkg/planner/core/logical_plan_builder.go +++ b/pkg/planner/core/logical_plan_builder.go @@ -4858,13 +4858,21 @@ func (b *PlanBuilder) tryBuildCTE(ctx context.Context, tn *ast.TableName, asName } if cte.cteClass == nil { - cte.cteClass = &CTEClass{IsDistinct: cte.isDistinct, seedPartLogicalPlan: cte.seedLP, - recursivePartLogicalPlan: cte.recurLP, IDForStorage: cte.storageID, - optFlag: cte.optFlag, HasLimit: hasLimit, LimitBeg: limitBeg, - LimitEnd: limitEnd, pushDownPredicates: make([]expression.Expression, 0), ColumnMap: make(map[string]*expression.Column)} + cte.cteClass = &CTEClass{ + IsDistinct: cte.isDistinct, + seedPartLogicalPlan: cte.seedLP, + recursivePartLogicalPlan: cte.recurLP, + IDForStorage: cte.storageID, + optFlag: cte.optFlag, + HasLimit: hasLimit, + LimitBeg: limitBeg, + LimitEnd: limitEnd, + pushDownPredicates: make([]expression.Expression, 0), + ColumnMap: make(map[string]*expression.Column), + } } var p LogicalPlan - lp := LogicalCTE{cteAsName: tn.Name, cteName: tn.Name, cte: cte.cteClass, seedStat: cte.seedStat, isOuterMostCTE: !b.buildingCTE}.Init(b.ctx, b.getSelectOffset()) + lp := LogicalCTE{cteAsName: tn.Name, cteName: tn.Name, cte: cte.cteClass, seedStat: cte.seedStat}.Init(b.ctx, b.getSelectOffset()) prevSchema := cte.seedLP.Schema().Clone() lp.SetSchema(getResultCTESchema(cte.seedLP.Schema(), b.ctx.GetSessionVars())) diff --git a/pkg/planner/core/logical_plans.go b/pkg/planner/core/logical_plans.go index 1765f3bbbb758..4ac2900bb77f9 100644 --- a/pkg/planner/core/logical_plans.go +++ b/pkg/planner/core/logical_plans.go @@ -2339,6 +2339,7 @@ type CTEClass struct { // pushDownPredicates may be push-downed by different references. pushDownPredicates []expression.Expression ColumnMap map[string]*expression.Column + isOuterMostCTE bool } const emptyCTEClassSize = int64(unsafe.Sizeof(CTEClass{})) @@ -2370,11 +2371,10 @@ func (cc *CTEClass) MemoryUsage() (sum int64) { type LogicalCTE struct { logicalSchemaProducer - cte *CTEClass - cteAsName model.CIStr - cteName model.CIStr - seedStat *property.StatsInfo - isOuterMostCTE bool + cte *CTEClass + cteAsName model.CIStr + cteName model.CIStr + seedStat *property.StatsInfo onlyUsedAsStorage bool } diff --git a/pkg/planner/core/logical_plans_test.go b/pkg/planner/core/logical_plans_test.go index 82283419f6012..87c6ca25ea925 100644 --- a/pkg/planner/core/logical_plans_test.go +++ b/pkg/planner/core/logical_plans_test.go @@ -115,7 +115,7 @@ func TestPredicatePushDown(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err) @@ -135,7 +135,7 @@ func TestImplicitCastNotNullFlag(t *testing.T) { defer s.Close() stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagJoinReOrder|flagPrunColumns|flagEliminateProjection, p.(LogicalPlan)) require.NoError(t, err) @@ -153,7 +153,7 @@ func TestEliminateProjectionUnderUnion(t *testing.T) { defer s.Close() stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagJoinReOrder|flagPrunColumns|flagEliminateProjection, p.(LogicalPlan)) require.NoError(t, err) @@ -180,7 +180,7 @@ func TestJoinPredicatePushDown(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -220,7 +220,7 @@ func TestOuterWherePredicatePushDown(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -266,7 +266,7 @@ func TestSimplifyOuterJoin(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -307,7 +307,7 @@ func TestAntiSemiJoinConstFalse(t *testing.T) { comment := fmt.Sprintf("for %s", ca.sql) stmt, err := s.p.ParseOneStmt(ca.sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagDecorrelate|flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -335,7 +335,7 @@ func TestDeriveNotNullConds(t *testing.T) { comment := fmt.Sprintf("for %s", ca) stmt, err := s.p.ParseOneStmt(ca, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain|flagDecorrelate, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -364,7 +364,7 @@ func TestExtraPKNotNullFlag(t *testing.T) { comment := fmt.Sprintf("for %s", sql) stmt, err := s.p.ParseOneStmt(sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) ds := p.(*LogicalProjection).children[0].(*LogicalAggregation).children[0].(*DataSource) require.Equal(t, "_tidb_rowid", ds.Columns[2].Name.L) @@ -385,7 +385,7 @@ func buildLogicPlan4GroupBy(s *plannerSuite, t *testing.T, sql string) (Plan, er stmt.(*ast.SelectStmt).From.TableRefs.Left.(*ast.TableSource).Source.(*ast.TableName).TableInfo = mockedTableInfo - p, _, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) return p, err } @@ -447,7 +447,7 @@ func TestDupRandJoinCondsPushDown(t *testing.T) { defer s.Close() stmt, err := s.p.ParseOneStmt(sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -517,7 +517,7 @@ func TestTablePartition(t *testing.T) { testdata.OnRecord(func() { }) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, isChoices[ca.IsIdx]) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, isChoices[ca.IsIdx]) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain|flagPredicatePushDown|flagPartitionProcessor, p.(LogicalPlan)) require.NoError(t, err) @@ -543,7 +543,7 @@ func TestSubquery(t *testing.T) { err = Preprocess(context.Background(), s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) if lp, ok := p.(LogicalPlan); ok { p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagDecorrelate|flagPrunColumns|flagPrunColumnsAgain|flagSemiJoinRewrite, lp) @@ -572,7 +572,7 @@ func TestPlanBuilder(t *testing.T) { s.ctx.GetSessionVars().SetHashJoinConcurrency(1) err = Preprocess(context.Background(), s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) if lp, ok := p.(LogicalPlan); ok { p, err = logicalOptimize(context.TODO(), flagPrunColumns|flagPrunColumnsAgain, lp) @@ -597,7 +597,7 @@ func TestJoinReOrder(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagJoinReOrder, p.(LogicalPlan)) require.NoError(t, err) @@ -626,7 +626,7 @@ func TestEagerAggregation(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain|flagPushDownAgg, p.(LogicalPlan)) require.NoError(t, err) @@ -652,7 +652,7 @@ func TestColumnPruning(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) lp, err := logicalOptimize(ctx, flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err) @@ -681,7 +681,7 @@ func TestSortByItemsPruning(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) lp, err := logicalOptimize(ctx, flagEliminateProjection|flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err) @@ -711,7 +711,7 @@ func TestProjectionEliminator(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt.sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagPrunColumns|flagPrunColumnsAgain|flagEliminateProjection, p.(LogicalPlan)) require.NoError(t, err) @@ -725,7 +725,7 @@ func TestCS3389(t *testing.T) { ctx := context.Background() stmt, err := s.p.ParseOneStmt("select count(*) from t where a in (select b from t2 where a is null);", "", "") require.NoError(t, err) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagPrunColumns|flagPrunColumnsAgain|flagEliminateProjection|flagJoinReOrder, p.(LogicalPlan)) require.NoError(t, err) @@ -979,7 +979,7 @@ func TestValidate(t *testing.T) { require.NoError(t, err, comment) err = Preprocess(context.Background(), s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err) - _, _, err = BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + _, err = BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) if tt.err == nil { require.NoError(t, err, comment) } else { @@ -1031,7 +1031,7 @@ func TestUniqueKeyInfo(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) lp, err := logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagBuildKeyInfo, p.(LogicalPlan)) require.NoError(t, err) @@ -1054,7 +1054,7 @@ func TestAggPrune(t *testing.T) { stmt, err := s.p.ParseOneStmt(tt, "", "") require.NoError(t, err, comment) domain.GetDomain(s.ctx).MockInfoCacheAndLoadInfoSchema(s.is) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err) p, err = logicalOptimize(context.TODO(), flagPredicatePushDown|flagPrunColumns|flagPrunColumnsAgain|flagBuildKeyInfo|flagEliminateAgg|flagEliminateProjection, p.(LogicalPlan)) @@ -1666,7 +1666,7 @@ func TestNameResolver(t *testing.T) { require.NoError(t, err, comment) s.ctx.GetSessionVars().SetHashJoinConcurrency(1) - _, _, err = BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + _, err = BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) if test.err == "" { require.NoError(t, err) } else { @@ -2158,7 +2158,7 @@ func TestResolvingCorrelatedAggregate(t *testing.T) { require.NoError(t, err, comment) err = Preprocess(context.Background(), s.ctx, stmt, WithPreprocessorReturn(&PreprocessorReturn{InfoSchema: s.is})) require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) p, err = logicalOptimize(context.TODO(), flagBuildKeyInfo|flagEliminateProjection|flagPrunColumns|flagPrunColumnsAgain, p.(LogicalPlan)) require.NoError(t, err, comment) @@ -2241,7 +2241,7 @@ func TestWindowLogicalPlanAmbiguous(t *testing.T) { for i := 0; i < iterations; i++ { stmt, err := s.p.ParseOneStmt(sql, "", "") require.NoError(t, err) - p, _, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(context.Background(), s.ctx, stmt, s.is) require.NoError(t, err) if planString == "" { planString = ToString(p) @@ -2282,7 +2282,7 @@ func TestRemoveOrderbyInSubquery(t *testing.T) { comment := fmt.Sprintf("case:%v sql:%s", i, tt.sql) stmt, err := s.p.ParseOneStmt(tt.sql, "", "") require.NoError(t, err, comment) - p, _, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) + p, err := BuildLogicalPlanForTest(ctx, s.ctx, stmt, s.is) require.NoError(t, err, comment) require.Equal(t, tt.best, ToString(p), comment) } diff --git a/pkg/planner/core/memtable_predicate_extractor.go b/pkg/planner/core/memtable_predicate_extractor.go index fb33a510e7bdc..6ea343681928e 100644 --- a/pkg/planner/core/memtable_predicate_extractor.go +++ b/pkg/planner/core/memtable_predicate_extractor.go @@ -446,7 +446,7 @@ func (helper extractHelper) extractTimeRange( if colName == extractColName { timeType := types.NewFieldType(mysql.TypeDatetime) timeType.SetDecimal(6) - timeDatum, err := datums[0].ConvertTo(ctx.GetSessionVars().StmtCtx, timeType) + timeDatum, err := datums[0].ConvertTo(ctx.GetSessionVars().StmtCtx.TypeCtx(), timeType) if err != nil || timeDatum.Kind() == types.KindNull { remained = append(remained, expr) continue diff --git a/pkg/planner/core/memtable_predicate_extractor_test.go b/pkg/planner/core/memtable_predicate_extractor_test.go index ea21f18572270..2c53fa8f2120b 100644 --- a/pkg/planner/core/memtable_predicate_extractor_test.go +++ b/pkg/planner/core/memtable_predicate_extractor_test.go @@ -1815,7 +1815,7 @@ func TestExtractorInPreparedStmt(t *testing.T) { prepared: "select * from information_schema.tidb_hot_regions_history where update_time>=?", userVars: []interface{}{"cast('2019-10-10 10:10:10' as datetime)"}, params: []interface{}{func() types.Time { - tt, err := types.ParseTimestamp(tk.Session().GetSessionVars().StmtCtx, "2019-10-10 10:10:10") + tt, err := types.ParseTimestamp(tk.Session().GetSessionVars().StmtCtx.TypeCtx(), "2019-10-10 10:10:10") require.NoError(t, err) return tt }()}, diff --git a/pkg/planner/core/optimizer.go b/pkg/planner/core/optimizer.go index 74786afb6b166..389aaebe8f313 100644 --- a/pkg/planner/core/optimizer.go +++ b/pkg/planner/core/optimizer.go @@ -172,15 +172,18 @@ type logicalOptRule interface { } // BuildLogicalPlanForTest builds a logical plan for testing purpose from ast.Node. -func BuildLogicalPlanForTest(ctx context.Context, sctx sessionctx.Context, node ast.Node, infoSchema infoschema.InfoSchema) (Plan, types.NameSlice, error) { +func BuildLogicalPlanForTest(ctx context.Context, sctx sessionctx.Context, node ast.Node, infoSchema infoschema.InfoSchema) (Plan, error) { sctx.GetSessionVars().PlanID.Store(0) sctx.GetSessionVars().PlanColumnID.Store(0) builder, _ := NewPlanBuilder().Init(sctx, infoSchema, &utilhint.BlockHintProcessor{}) p, err := builder.Build(ctx, node) if err != nil { - return nil, nil, err + return nil, err + } + if logic, ok := p.(LogicalPlan); ok { + RecheckCTE(logic) } - return p, p.OutputNames(), err + return p, err } // CheckPrivilege checks the privilege for a user. diff --git a/pkg/planner/core/plan_cache.go b/pkg/planner/core/plan_cache.go index 9cad0384c919b..6b406788035af 100644 --- a/pkg/planner/core/plan_cache.go +++ b/pkg/planner/core/plan_cache.go @@ -494,7 +494,7 @@ func rebuildRange(p Plan) error { if err != nil { return err } - iv, err := dVal.ToInt64(sc) + iv, err := dVal.ToInt64(sc.TypeCtx()) if err != nil { return err } @@ -560,7 +560,7 @@ func rebuildRange(p Plan) error { if err != nil { return err } - iv, err := dVal.ToInt64(sc) + iv, err := dVal.ToInt64(sc.TypeCtx()) if err != nil { return err } @@ -619,12 +619,12 @@ func convertConstant2Datum(sc *stmtctx.StatementContext, con *expression.Constan if err != nil { return nil, err } - dVal, err := val.ConvertTo(sc, target) + dVal, err := val.ConvertTo(sc.TypeCtx(), target) if err != nil { return nil, err } // The converted result must be same as original datum. - cmp, err := dVal.Compare(sc, &val, collate.GetCollator(target.GetCollate())) + cmp, err := dVal.Compare(sc.TypeCtx(), &val, collate.GetCollator(target.GetCollate())) if err != nil || cmp != 0 { return nil, errors.New("Convert constant to datum is failed, because the constant has changed after the covert") } diff --git a/pkg/planner/core/plan_cache_test.go b/pkg/planner/core/plan_cache_test.go index 3a70630733020..2c45bc31f0d48 100644 --- a/pkg/planner/core/plan_cache_test.go +++ b/pkg/planner/core/plan_cache_test.go @@ -339,6 +339,95 @@ func TestIssue38533(t *testing.T) { tk.MustQuery("select @@last_plan_from_cache").Check(testkit.Rows("0")) } +func TestPlanCacheGeneratedCols(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`set @@tidb_opt_fix_control = "45798:on"`) + tk.MustExec(`create table t1 (a int, info json, city varchar(64) as (JSON_UNQUOTE(JSON_EXTRACT(info, '$.city'))))`) + tk.MustExec(`create table t2 (a int, info json, city varchar(64) as (JSON_UNQUOTE(JSON_EXTRACT(info, '$.city'))) virtual)`) + tk.MustExec(`create table t3 (a int, info json, city varchar(64) as (JSON_UNQUOTE(JSON_EXTRACT(info, '$.city'))) stored)`) + tk.MustExec(`create table t4 (a int, info json, index zips( (CAST(info->'$.zipcode' AS UNSIGNED ARRAY))))`) + + tk.MustExec(`set @a=1`) + tk.MustExec(`set @b=2`) + + tk.MustExec(`prepare s1 from 'select * from t1 where a=?'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustQuery(`execute s1 using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute s1 using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache + + tk.MustExec(`prepare s1 from 'select * from t2 where a=?'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustQuery(`execute s1 using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute s1 using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache + + tk.MustExec(`prepare s1 from 'select * from t3 where a=?'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustQuery(`execute s1 using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute s1 using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache + + tk.MustExec(`prepare s1 from 'select * from t4 where a=?'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustQuery(`execute s1 using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute s1 using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache +} + +func TestPlanCacheGeneratedCols2(t *testing.T) { + store := testkit.CreateMockStore(t) + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + tk.MustExec(`set @@tidb_opt_fix_control = "45798:on"`) + tk.MustExec(`CREATE TABLE t1 ( + ipk varbinary(255) NOT NULL, + i_id varchar(45) DEFAULT NULL, + i_set_id varchar(45) DEFAULT NULL, + p_id varchar(45) DEFAULT NULL, + p_set_id varchar(45) DEFAULT NULL, + m_id bigint(20) DEFAULT NULL, + m_i_id varchar(127) DEFAULT NULL, + m_i_set_id varchar(127) DEFAULT NULL, + d json DEFAULT NULL, + p_sources json DEFAULT NULL, + nslc json DEFAULT NULL, + cl json DEFAULT NULL, + fii json DEFAULT NULL, + fpi json DEFAULT NULL, + PRIMARY KEY (ipk) /*T![clustered_index] CLUSTERED */, + UNIQUE KEY i_id (i_id), + KEY d ((cast(d as char(253) array))), + KEY m_i_id (m_i_id), + KEY m_i_set_id (m_i_set_id), + KEY fpi ((cast(fpi as unsigned array))), + KEY nslc ((cast(nslc as char(1000) array))), + KEY cl ((cast(cl as char(3000) array))), + KEY fii ((cast(fii as unsigned array))), + KEY m_id (m_id), + KEY i_set_id (i_set_id), + KEY m_i_and_m_id (m_i_id,m_id))`) + + tk.MustExec(`CREATE TABLE t2 ( + ipk varbinary(255) NOT NULL, + created_time bigint(20) DEFAULT NULL, + arrival_time bigint(20) DEFAULT NULL, + updated_time bigint(20) DEFAULT NULL, + timestamp_data json DEFAULT NULL, + PRIMARY KEY (ipk) /*T![clustered_index] CLUSTERED */)`) + + tk.MustExec(`prepare stmt from 'select * + from ( t1 left outer join t2 on ( t1 . ipk = t2 . ipk ) ) + where ( t1 . i_id = ? )'`) + tk.MustQuery(`show warnings`).Check(testkit.Rows()) // no warning + tk.MustExec(`set @a='a', @b='b'`) + tk.MustQuery(`execute stmt using @a`).Check(testkit.Rows()) + tk.MustQuery(`execute stmt using @b`).Check(testkit.Rows()) + tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows(`1`)) // hit cache +} + func TestInvalidRange(t *testing.T) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) diff --git a/pkg/planner/core/plan_cacheable_checker.go b/pkg/planner/core/plan_cacheable_checker.go index 717f16d97d50e..3ecfa73f93b25 100644 --- a/pkg/planner/core/plan_cacheable_checker.go +++ b/pkg/planner/core/plan_cacheable_checker.go @@ -580,7 +580,7 @@ func isPhysicalPlanCacheable(sctx sessionctx.Context, p PhysicalPlan, paramNum, case *PhysicalMemTable: return false, "PhysicalMemTable plan is un-cacheable" case *PhysicalIndexMergeReader: - if x.AccessMVIndex { + if x.AccessMVIndex && !enablePlanCacheForGeneratedCols(sctx) { return false, "the plan with IndexMerge accessing Multi-Valued Index is un-cacheable" } underIndexMerge = true @@ -622,6 +622,15 @@ func getMaxParamLimit(sctx sessionctx.Context) int { return v } +func enablePlanCacheForGeneratedCols(sctx sessionctx.Context) bool { + // disable this by default since it's not well tested. + // TODO: complete its test and enable it by default. + if sctx == nil || sctx.GetSessionVars() == nil || sctx.GetSessionVars().GetOptimizerFixControlMap() == nil { + return false + } + return fixcontrol.GetBoolWithDefault(sctx.GetSessionVars().GetOptimizerFixControlMap(), fixcontrol.Fix45798, false) +} + // checkTableCacheable checks whether a query accessing this table is cacheable. func checkTableCacheable(ctx context.Context, sctx sessionctx.Context, schema infoschema.InfoSchema, node *ast.TableName, isNonPrep bool) (cacheable bool, reason string) { tableSchema := node.Schema @@ -653,9 +662,12 @@ func checkTableCacheable(ctx context.Context, sctx sessionctx.Context, schema in */ return false, "query accesses partitioned tables is un-cacheable" } - for _, col := range tb.Cols() { - if col.IsGenerated() { - return false, "query accesses generated columns is un-cacheable" + + if !enablePlanCacheForGeneratedCols(sctx) { + for _, col := range tb.Cols() { + if col.IsGenerated() { + return false, "query accesses generated columns is un-cacheable" + } } } if tb.Meta().TempTableType != model.TempTableNone { diff --git a/pkg/planner/core/planbuilder.go b/pkg/planner/core/planbuilder.go index 700890377be4c..b216dbdc5eac0 100644 --- a/pkg/planner/core/planbuilder.go +++ b/pkg/planner/core/planbuilder.go @@ -3028,7 +3028,7 @@ func handleAnalyzeOptionsV2(opts []ast.AnalyzeOpt) (map[ast.AnalyzeOptionType]ui optMap[opt.Type] = v case ast.AnalyzeOptSampleRate: // Only Int/Float/decimal is accepted, so pass nil here is safe. - fVal, err := datumValue.ToFloat64(types.DefaultNoWarningContext) + fVal, err := datumValue.ToFloat64(types.DefaultStmtNoWarningContext) if err != nil { return nil, err } @@ -3091,7 +3091,7 @@ func handleAnalyzeOptions(opts []ast.AnalyzeOpt, statsVer int) (map[ast.AnalyzeO optMap[opt.Type] = v case ast.AnalyzeOptSampleRate: // Only Int/Float/decimal is accepted, so pass nil here is safe. - fVal, err := datumValue.ToFloat64(types.DefaultNoWarningContext) + fVal, err := datumValue.ToFloat64(types.DefaultStmtNoWarningContext) if err != nil { return nil, err } @@ -4676,7 +4676,7 @@ func (b *PlanBuilder) convertValue(valueItem ast.ExprNode, mockTablePlan Logical if err != nil { return d, err } - d, err = value.ConvertTo(b.ctx.GetSessionVars().StmtCtx, &col.FieldType) + d, err = value.ConvertTo(b.ctx.GetSessionVars().StmtCtx.TypeCtx(), &col.FieldType) if err != nil { if !types.ErrTruncated.Equal(err) && !types.ErrTruncatedWrongVal.Equal(err) && !types.ErrBadNumber.Equal(err) { return d, err @@ -5571,10 +5571,8 @@ func calcTSForPlanReplayer(sctx sessionctx.Context, tsExpr ast.ExprNode) uint64 tpLonglong.SetFlag(mysql.UnsignedFlag) // We need a strict check, which means no truncate or any other warnings/errors, or it will wrongly try to parse // a date/time string into a TSO. - // To achieve this, we need to set fields like StatementContext.IgnoreTruncate to false, and maybe it's better - // not to modify and reuse the original StatementContext, so we use a temporary one here. - tmpStmtCtx := stmtctx.NewStmtCtxWithTimeZone(sctx.GetSessionVars().Location()) - tso, err := tsVal.ConvertTo(tmpStmtCtx, tpLonglong) + // To achieve this, we create a new type context without re-using the one in statement context. + tso, err := tsVal.ConvertTo(types.DefaultStmtNoWarningContext.WithLocation(sctx.GetSessionVars().Location()), tpLonglong) if err == nil { return tso.GetUint64() } @@ -5583,7 +5581,7 @@ func calcTSForPlanReplayer(sctx sessionctx.Context, tsExpr ast.ExprNode) uint64 // this part is similar to CalculateAsOfTsExpr tpDateTime := types.NewFieldType(mysql.TypeDatetime) tpDateTime.SetDecimal(6) - timestamp, err := tsVal.ConvertTo(sctx.GetSessionVars().StmtCtx, tpDateTime) + timestamp, err := tsVal.ConvertTo(sctx.GetSessionVars().StmtCtx.TypeCtx(), tpDateTime) if err != nil { sctx.GetSessionVars().StmtCtx.AppendWarning(err) return 0 diff --git a/pkg/planner/core/point_get_plan.go b/pkg/planner/core/point_get_plan.go index 63403cfe5256f..68ab03f220ba6 100644 --- a/pkg/planner/core/point_get_plan.go +++ b/pkg/planner/core/point_get_plan.go @@ -1444,7 +1444,7 @@ func getNameValuePairs(ctx sessionctx.Context, tbl *model.TableInfo, tblName mod if !checkCanConvertInPointGet(col, d) { return nil, false } - dVal, err := d.ConvertTo(stmtCtx, &col.FieldType) + dVal, err := d.ConvertTo(stmtCtx.TypeCtx(), &col.FieldType) if err != nil { if terror.ErrorEqual(types.ErrOverflow, err) { return append(nvPairs, nameValuePair{colName: colName.Name.Name.L, colFieldType: &col.FieldType, value: d, con: con}), true @@ -1455,7 +1455,7 @@ func getNameValuePairs(ctx sessionctx.Context, tbl *model.TableInfo, tblName mod } } // The converted result must be same as original datum. - cmp, err := dVal.Compare(stmtCtx, &d, collate.GetCollator(col.GetCollate())) + cmp, err := dVal.Compare(stmtCtx.TypeCtx(), &d, collate.GetCollator(col.GetCollate())) if err != nil || cmp != 0 { return nil, false } @@ -1468,12 +1468,12 @@ func getPointGetValue(stmtCtx *stmtctx.StatementContext, col *model.ColumnInfo, if !checkCanConvertInPointGet(col, *d) { return nil } - dVal, err := d.ConvertTo(stmtCtx, &col.FieldType) + dVal, err := d.ConvertTo(stmtCtx.TypeCtx(), &col.FieldType) if err != nil { return nil } // The converted result must be same as original datum. - cmp, err := dVal.Compare(stmtCtx, d, collate.GetCollator(col.GetCollate())) + cmp, err := dVal.Compare(stmtCtx.TypeCtx(), d, collate.GetCollator(col.GetCollate())) if err != nil || cmp != 0 { return nil } diff --git a/pkg/planner/core/recheck_cte.go b/pkg/planner/core/recheck_cte.go new file mode 100644 index 0000000000000..87df6cd348f9f --- /dev/null +++ b/pkg/planner/core/recheck_cte.go @@ -0,0 +1,53 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package core + +import "github.com/pingcap/tidb/pkg/util/intset" + +// RecheckCTE fills the IsOuterMostCTE field for CTEs. +// It's a temp solution to before we fully use the Sequence to optimize the CTEs. +// This func checks whether the CTE is referenced only by the main query or not. +func RecheckCTE(p LogicalPlan) { + visited := intset.NewFastIntSet() + findCTEs(p, &visited, true) +} + +func findCTEs( + p LogicalPlan, + visited *intset.FastIntSet, + isRootTree bool, +) { + if cteReader, ok := p.(*LogicalCTE); ok { + cte := cteReader.cte + if !isRootTree { + // Set it to false since it's referenced by other CTEs. + cte.isOuterMostCTE = false + } + if visited.Has(cte.IDForStorage) { + return + } + visited.Insert(cte.IDForStorage) + // Set it when we meet it first time. + cte.isOuterMostCTE = isRootTree + findCTEs(cte.seedPartLogicalPlan, visited, false) + if cte.recursivePartLogicalPlan != nil { + findCTEs(cte.recursivePartLogicalPlan, visited, false) + } + return + } + for _, child := range p.Children() { + findCTEs(child, visited, isRootTree) + } +} diff --git a/pkg/planner/core/rule_generate_column_substitute.go b/pkg/planner/core/rule_generate_column_substitute.go index 117551f5c5db3..f39c5aa5d0d55 100644 --- a/pkg/planner/core/rule_generate_column_substitute.go +++ b/pkg/planner/core/rule_generate_column_substitute.go @@ -60,6 +60,12 @@ func collectGenerateColumn(lp LogicalPlan, exprToColumn ExprColumnMap) { if !ok { return } + // detect the read_from_storage(tiflash) hints, since virtual column will + // block the mpp task spreading (only supporting MPP table scan), causing + // mpp plan fail the cost comparison with tikv index plan. + if ds.preferStoreType&preferTiFlash != 0 { + return + } for _, p := range ds.possibleAccessPaths { if p.IsTablePath() { continue diff --git a/pkg/planner/core/rule_generate_column_substitute_test.go b/pkg/planner/core/rule_generate_column_substitute_test.go index 385c8fb463f8f..bf6c322c684ca 100644 --- a/pkg/planner/core/rule_generate_column_substitute_test.go +++ b/pkg/planner/core/rule_generate_column_substitute_test.go @@ -97,7 +97,7 @@ import ( // . . 86: fmt.Println(sql) // . . 87: stmt, err := s.GetParser().ParseOneStmt(sql, "", "") // . . 88: require.NoError(b, err, sql) -// . 512.01kB 89: p, _, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) +// . 512.01kB 89: p, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) // . . 90: require.NoError(b, err) // . . 91: selection := p.(core.LogicalPlan).Children()[0] // . . 92: m := make(core.ExprColumnMap, len(selection.Schema().Columns)) @@ -187,7 +187,7 @@ import ( // . . 86: fmt.Println(sql) // . . 87: stmt, err := s.GetParser().ParseOneStmt(sql, "", "") // . . 88: require.NoError(b, err, sql) -// . 512.07kB 89: p, _, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) +// . 512.07kB 89: p, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) // . . 90: require.NoError(b, err) // . . 91: selection := p.(core.LogicalPlan).Children()[0] // . . 92: m := make(core.ExprColumnMap, len(selection.Schema().Columns)) @@ -262,7 +262,7 @@ func BenchmarkSubstituteExpression(b *testing.B) { fmt.Println(sql) stmt, err := s.GetParser().ParseOneStmt(sql, "", "") require.NoError(b, err, sql) - p, _, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) + p, err := core.BuildLogicalPlanForTest(ctx, s.GetCtx(), stmt, s.GetIS()) require.NoError(b, err) selection := p.(core.LogicalPlan).Children()[0] m := make(core.ExprColumnMap, len(selection.Schema().Columns)) diff --git a/pkg/planner/core/rule_partition_processor.go b/pkg/planner/core/rule_partition_processor.go index 661328691cde7..f3bfe3eaf3007 100644 --- a/pkg/planner/core/rule_partition_processor.go +++ b/pkg/planner/core/rule_partition_processor.go @@ -1081,7 +1081,7 @@ func minCmp(ctx sessionctx.Context, lowVal []types.Datum, columnsPruner *rangeCo return true } // Add Null as point here? - cmp, err := con.Value.Compare(ctx.GetSessionVars().StmtCtx, &lowVal[j], comparer[j]) + cmp, err := con.Value.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &lowVal[j], comparer[j]) if err != nil { *gotError = true } @@ -1160,7 +1160,7 @@ func maxCmp(ctx sessionctx.Context, hiVal []types.Datum, columnsPruner *rangeCol return false } // Add Null as point here? - cmp, err := con.Value.Compare(ctx.GetSessionVars().StmtCtx, &hiVal[j], comparer[j]) + cmp, err := con.Value.Compare(ctx.GetSessionVars().StmtCtx.TypeCtx(), &hiVal[j], comparer[j]) if err != nil { *gotError = true // error pushed, we will still use the cmp value @@ -1388,7 +1388,7 @@ func partitionRangeForInExpr(sctx sessionctx.Context, args []expression.Expressi partFnConst := replaceColumnWithConst(pruner.partFn, constExpr) val, _, err = partFnConst.EvalInt(sctx, chunk.Row{}) } else { - val, err = constExpr.Value.ToInt64(sctx.GetSessionVars().StmtCtx) + val, err = constExpr.Value.ToInt64(sctx.GetSessionVars().StmtCtx.TypeCtx()) } if err != nil { return pruner.fullRange() diff --git a/pkg/planner/core/rule_predicate_push_down.go b/pkg/planner/core/rule_predicate_push_down.go index d5fbaab074038..9277d3574b2fc 100644 --- a/pkg/planner/core/rule_predicate_push_down.go +++ b/pkg/planner/core/rule_predicate_push_down.go @@ -998,7 +998,7 @@ func (p *LogicalCTE) PredicatePushDown(predicates []expression.Expression, _ *lo // Doesn't support recursive CTE yet. return predicates, p.self } - if !p.isOuterMostCTE { + if !p.cte.isOuterMostCTE { return predicates, p.self } pushedPredicates := make([]expression.Expression, len(predicates)) diff --git a/pkg/planner/core/rule_result_reorder.go b/pkg/planner/core/rule_result_reorder.go index 96e8137389b0e..ed340f831ed60 100644 --- a/pkg/planner/core/rule_result_reorder.go +++ b/pkg/planner/core/rule_result_reorder.go @@ -107,6 +107,9 @@ func (rs *resultReorder) extractHandleCol(lp LogicalPlan) *expression.Column { switch x := lp.(type) { case *LogicalSelection, *LogicalLimit: handleCol := rs.extractHandleCol(lp.Children()[0]) + if handleCol == nil { + return nil // fail to extract handle column from the child, just return nil. + } if x.Schema().Contains(handleCol) { // some Projection Operator might be inlined, so check the column again here return handleCol diff --git a/pkg/planner/memo/group_test.go b/pkg/planner/memo/group_test.go index 12dd4ca913192..69958e8ec0398 100644 --- a/pkg/planner/memo/group_test.go +++ b/pkg/planner/memo/group_test.go @@ -104,7 +104,7 @@ func TestGroupFingerPrint(t *testing.T) { do := domain.GetDomain(ctx) do.StatsHandle().Close() }() - plan, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt1, is) + plan, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt1, is) require.NoError(t, err) logic1, ok := plan.(plannercore.LogicalPlan) require.True(t, ok) @@ -259,7 +259,7 @@ func TestBuildKeyInfo(t *testing.T) { // case 1: primary key has constant constraint stmt1, err := p.ParseOneStmt("select a from t where a = 10", "", "") require.NoError(t, err) - p1, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt1, is) + p1, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt1, is) require.NoError(t, err) logic1, ok := p1.(plannercore.LogicalPlan) require.True(t, ok) @@ -271,7 +271,7 @@ func TestBuildKeyInfo(t *testing.T) { // case 2: group by column is key stmt2, err := p.ParseOneStmt("select b, sum(a) from t group by b", "", "") require.NoError(t, err) - p2, _, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt2, is) + p2, err := plannercore.BuildLogicalPlanForTest(context.Background(), ctx, stmt2, is) require.NoError(t, err) logic2, ok := p2.(plannercore.LogicalPlan) require.True(t, ok) diff --git a/pkg/planner/optimize.go b/pkg/planner/optimize.go index 5e2f3454f163a..cb85ff4ab9536 100644 --- a/pkg/planner/optimize.go +++ b/pkg/planner/optimize.go @@ -520,6 +520,8 @@ func optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in return p, names, 0, nil } + core.RecheckCTE(logic) + // Handle the logical plan statement, use cascades planner if enabled. if sessVars.GetEnableCascadesPlanner() { finalPlan, cost, err := cascades.DefaultOptimizer.FindBestPlan(sctx, logic) diff --git a/pkg/planner/util/fixcontrol/get.go b/pkg/planner/util/fixcontrol/get.go index fad759f363cf8..7c8c976e73dca 100644 --- a/pkg/planner/util/fixcontrol/get.go +++ b/pkg/planner/util/fixcontrol/get.go @@ -34,6 +34,8 @@ const ( Fix44855 uint64 = 44855 // Fix45132 controls whether to use access range row count to determine access path on the Skyline pruning. Fix45132 uint64 = 45132 + // Fix45798 controls whether to cache plans that access generated columns. + Fix45798 uint64 = 45798 ) // GetStr fetches the given key from the fix control map as a string type. diff --git a/pkg/privilege/privileges/BUILD.bazel b/pkg/privilege/privileges/BUILD.bazel index 83a6920f734d4..fb8885ca5bbe5 100644 --- a/pkg/privilege/privileges/BUILD.bazel +++ b/pkg/privilege/privileges/BUILD.bazel @@ -76,7 +76,7 @@ go_test( "//pkg/util/dbterror/exeerrors", "//pkg/util/hack", "//pkg/util/sem", - "//pkg/util/sqlexec", + "//pkg/util/sqlescape", "@com_github_lestrrat_go_jwx_v2//jwa", "@com_github_lestrrat_go_jwx_v2//jwk", "@com_github_lestrrat_go_jwx_v2//jws", diff --git a/pkg/privilege/privileges/privileges.go b/pkg/privilege/privileges/privileges.go index d394bacdb69ae..1fe786399f0dd 100644 --- a/pkg/privilege/privileges/privileges.go +++ b/pkg/privilege/privileges/privileges.go @@ -984,7 +984,7 @@ func (passwordLocking *PasswordLocking) ParseJSON(passwordLockingJSON types.Bina if err != nil { return err } - passwordLocking.FailedLoginAttempts = mathutil.Min(passwordLocking.FailedLoginAttempts, math.MaxInt16) + passwordLocking.FailedLoginAttempts = min(passwordLocking.FailedLoginAttempts, math.MaxInt16) passwordLocking.FailedLoginAttempts = mathutil.Max(passwordLocking.FailedLoginAttempts, 0) passwordLocking.PasswordLockTimeDays, err = @@ -992,7 +992,7 @@ func (passwordLocking *PasswordLocking) ParseJSON(passwordLockingJSON types.Bina if err != nil { return err } - passwordLocking.PasswordLockTimeDays = mathutil.Min(passwordLocking.PasswordLockTimeDays, math.MaxInt16) + passwordLocking.PasswordLockTimeDays = min(passwordLocking.PasswordLockTimeDays, math.MaxInt16) passwordLocking.PasswordLockTimeDays = mathutil.Max(passwordLocking.PasswordLockTimeDays, -1) passwordLocking.FailedLoginCount, err = diff --git a/pkg/privilege/privileges/privileges_test.go b/pkg/privilege/privileges/privileges_test.go index 90f406444111b..a8c39ceda572a 100644 --- a/pkg/privilege/privileges/privileges_test.go +++ b/pkg/privilege/privileges/privileges_test.go @@ -47,7 +47,7 @@ import ( "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/sem" - "github.com/pingcap/tidb/pkg/util/sqlexec" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/stretchr/testify/require" ) @@ -1435,13 +1435,13 @@ func TestDynamicPrivsRegistration(t *testing.T) { // Check that all privileges registered are assignable to users, // including the recently registered ACDC_ADMIN for _, priv := range privileges.GetDynamicPrivileges() { - sqlGrant, err := sqlexec.EscapeSQL("GRANT %n ON *.* TO privassigntest", priv) + sqlGrant, err := sqlescape.EscapeSQL("GRANT %n ON *.* TO privassigntest", priv) require.NoError(t, err) tk.MustExec(sqlGrant) } // Check that all privileges registered are revokable for _, priv := range privileges.GetDynamicPrivileges() { - sqlGrant, err := sqlexec.EscapeSQL("REVOKE %n ON *.* FROM privassigntest", priv) + sqlGrant, err := sqlescape.EscapeSQL("REVOKE %n ON *.* FROM privassigntest", priv) require.NoError(t, err) tk.MustExec(sqlGrant) } diff --git a/pkg/resourcemanager/OWNERS b/pkg/resourcemanager/OWNERS new file mode 100644 index 0000000000000..93ea188e29c9f --- /dev/null +++ b/pkg/resourcemanager/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-resourcemanager diff --git a/pkg/resourcemanager/pool/spool/BUILD.bazel b/pkg/resourcemanager/pool/spool/BUILD.bazel index 03bec05e729c0..6fa13aa8492ad 100644 --- a/pkg/resourcemanager/pool/spool/BUILD.bazel +++ b/pkg/resourcemanager/pool/spool/BUILD.bazel @@ -15,7 +15,6 @@ go_library( "//pkg/resourcemanager/poolmanager", "//pkg/resourcemanager/util", "//pkg/util/logutil", - "//pkg/util/mathutil", "@com_github_prometheus_client_golang//prometheus", "@com_github_sasha_s_go_deadlock//:go-deadlock", "@org_uber_go_zap//:zap", diff --git a/pkg/resourcemanager/pool/spool/spool.go b/pkg/resourcemanager/pool/spool/spool.go index 0982d1f85893f..699860de5db54 100644 --- a/pkg/resourcemanager/pool/spool/spool.go +++ b/pkg/resourcemanager/pool/spool/spool.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/tidb/pkg/resourcemanager/poolmanager" "github.com/pingcap/tidb/pkg/resourcemanager/util" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/prometheus/client_golang/prometheus" "github.com/sasha-s/go-deadlock" "go.uber.org/zap" @@ -197,7 +196,7 @@ func (p *Pool) checkAndAddRunningInternal(concurrency int32) (conc int32, run bo } // if concurrency is 1 , we must return a goroutine // if concurrency is more than 1, we must return at least one goroutine. - result := mathutil.Min(n, concurrency) + result := min(n, concurrency) p.running.Add(result) return result, true } diff --git a/pkg/server/BUILD.bazel b/pkg/server/BUILD.bazel index a8f6075d6b464..f3a551b24bd1b 100644 --- a/pkg/server/BUILD.bazel +++ b/pkg/server/BUILD.bazel @@ -85,6 +85,7 @@ go_library( "//pkg/util/memory", "//pkg/util/printer", "//pkg/util/sqlexec", + "//pkg/util/sqlkiller", "//pkg/util/sys/linux", "//pkg/util/timeutil", "//pkg/util/tls", @@ -173,6 +174,7 @@ go_test( "//pkg/util/chunk", "//pkg/util/dbterror/exeerrors", "//pkg/util/replayer", + "//pkg/util/sqlkiller", "//pkg/util/syncutil", "//pkg/util/topsql/state", "@com_github_docker_go_units//:go-units", diff --git a/pkg/server/conn.go b/pkg/server/conn.go index ee9566dea4448..f8ff7732a834c 100644 --- a/pkg/server/conn.go +++ b/pkg/server/conn.go @@ -98,7 +98,6 @@ import ( "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/memory" tlsutil "github.com/pingcap/tidb/pkg/util/tls" topsqlstate "github.com/pingcap/tidb/pkg/util/topsql/state" "github.com/pingcap/tidb/pkg/util/tracing" @@ -1187,7 +1186,7 @@ func (cc *clientConn) addMetrics(cmd byte, startTime time.Time, err error) { func (cc *clientConn) dispatch(ctx context.Context, data []byte) error { defer func() { // reset killed for each request - atomic.StoreUint32(&cc.ctx.GetSessionVars().Killed, 0) + cc.ctx.GetSessionVars().SQLKiller.Reset() }() t := time.Now() if (cc.ctx.Status() & mysql.ServerStatusInTrans) > 0 { @@ -1252,7 +1251,7 @@ func (cc *clientConn) dispatch(ctx context.Context, data []byte) error { vars := cc.ctx.GetSessionVars() // reset killed for each request - atomic.StoreUint32(&vars.Killed, 0) + vars.SQLKiller.Reset() if cmd < mysql.ComEnd { cc.ctx.SetCommandValue(cmd) } @@ -1586,7 +1585,7 @@ func (cc *clientConn) handleLoadData(ctx context.Context, loadDataWorker *execut // https://dev.mysql.com/doc/dev/mysql-server/latest/page_protocol_com_query_response_local_infile_request.html for !drained { // check kill flag again, let the draining loop could quit if empty packet could not be received - if atomic.CompareAndSwapUint32(&loadDataWorker.UserSctx.GetSessionVars().Killed, 1, 0) { + if atomic.CompareAndSwapUint32(&loadDataWorker.UserSctx.GetSessionVars().SQLKiller.Signal, 1, 0) { logutil.Logger(ctx).Warn("receiving kill, stop draining data, connection may be reset") return exeerrors.ErrQueryInterrupted } @@ -2149,11 +2148,15 @@ func (cc *clientConn) writeResultSet(ctx context.Context, rs resultset.ResultSet if r == nil { return } - if str, ok := r.(string); !ok || !strings.HasPrefix(str, memory.PanicMemoryExceedWarnMsg) { + if recoverdErr, ok := r.(error); !ok || !(exeerrors.ErrMemoryExceedForQuery.Equal(recoverdErr) || + exeerrors.ErrMemoryExceedForInstance.Equal(recoverdErr) || + exeerrors.ErrQueryInterrupted.Equal(recoverdErr) || + exeerrors.ErrMaxExecTimeExceeded.Equal(recoverdErr)) { panic(r) + } else { + runErr = recoverdErr } // TODO(jianzhang.zj: add metrics here) - runErr = errors.Errorf("%v", r) logutil.Logger(ctx).Error("write query result panic", zap.Stringer("lastSQL", getLastStmtInConn{cc}), zap.Stack("stack"), zap.Any("recover", r)) }() cc.initResultEncoder(ctx) diff --git a/pkg/server/conn_test.go b/pkg/server/conn_test.go index 4813508c532a7..78b7ecd659f61 100644 --- a/pkg/server/conn_test.go +++ b/pkg/server/conn_test.go @@ -51,6 +51,7 @@ import ( "github.com/pingcap/tidb/pkg/util/arena" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/stretchr/testify/require" tikverr "github.com/tikv/client-go/v2/error" "github.com/tikv/client-go/v2/testutils" @@ -720,14 +721,14 @@ func TestConnExecutionTimeout(t *testing.T) { require.Equal(t, "[executor:3024]Query execution was interrupted, maximum statement execution time exceeded", err.Error()) // Killed because of max execution time, reset Killed to 0. - atomic.CompareAndSwapUint32(&tk.Session().GetSessionVars().Killed, 2, 0) + tk.Session().GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.MaxExecTimeExceeded) tk.MustExec("set @@max_execution_time = 0;") tk.MustQuery("select * FROM testTable2 WHERE SLEEP(1);").Check(testkit.Rows()) err = tk.QueryToErr("select /*+ MAX_EXECUTION_TIME(100)*/ * FROM testTable2 WHERE SLEEP(1);") require.Equal(t, "[executor:3024]Query execution was interrupted, maximum statement execution time exceeded", err.Error()) // Killed because of max execution time, reset Killed to 0. - atomic.CompareAndSwapUint32(&tk.Session().GetSessionVars().Killed, 2, 0) + tk.Session().GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.MaxExecTimeExceeded) err = cc.handleQuery(context.Background(), "select * FROM testTable2 WHERE SLEEP(1);") require.NoError(t, err) @@ -735,7 +736,7 @@ func TestConnExecutionTimeout(t *testing.T) { require.Equal(t, "[executor:3024]Query execution was interrupted, maximum statement execution time exceeded", err.Error()) // Killed because of max execution time, reset Killed to 0. - atomic.CompareAndSwapUint32(&tk.Session().GetSessionVars().Killed, 2, 0) + tk.Session().GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.MaxExecTimeExceeded) tk.MustExec("set @@max_execution_time = 500;") err = cc.handleQuery(context.Background(), "alter table testTable2 add index idx(age);") diff --git a/pkg/server/handler/optimizor/statistics_handler.go b/pkg/server/handler/optimizor/statistics_handler.go index 1c7b56f14aaa9..4af77791cd901 100644 --- a/pkg/server/handler/optimizor/statistics_handler.go +++ b/pkg/server/handler/optimizor/statistics_handler.go @@ -111,7 +111,7 @@ func (sh StatsHistoryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request } se.GetSessionVars().StmtCtx.SetTimeZone(time.Local) - t, err := types.ParseTime(se.GetSessionVars().StmtCtx, params[handler.Snapshot], mysql.TypeTimestamp, 6, nil) + t, err := types.ParseTime(se.GetSessionVars().StmtCtx.TypeCtx(), params[handler.Snapshot], mysql.TypeTimestamp, 6, nil) if err != nil { handler.WriteError(w, err) return diff --git a/pkg/server/handler/tikv_handler.go b/pkg/server/handler/tikv_handler.go index 5dd0c8af3f4c4..b5e72c1f5c508 100644 --- a/pkg/server/handler/tikv_handler.go +++ b/pkg/server/handler/tikv_handler.go @@ -160,7 +160,7 @@ func (*TikvHandlerTool) formValue2DatumRow(sc *stmtctx.StatementContext, values data[i].SetNull() case 1: bDatum := types.NewStringDatum(vals[0]) - cDatum, err := bDatum.ConvertTo(sc, &col.FieldType) + cDatum, err := bDatum.ConvertTo(sc.TypeCtx(), &col.FieldType) if err != nil { return nil, errors.Trace(err) } diff --git a/pkg/server/handler/tikvhandler/tikv_handler.go b/pkg/server/handler/tikvhandler/tikv_handler.go index 3cfe18cc2e7e0..a215221da1f82 100644 --- a/pkg/server/handler/tikvhandler/tikv_handler.go +++ b/pkg/server/handler/tikvhandler/tikv_handler.go @@ -1201,7 +1201,7 @@ func (h *TableHandler) addScatterSchedule(startKey, endKey []byte, name string) if err != nil { return err } - scheduleURL := fmt.Sprintf("%s://%s/pd/api/v1/schedulers", util.InternalHTTPSchema(), pdAddrs[0]) + scheduleURL := fmt.Sprintf("%s://%s%s", util.InternalHTTPSchema(), pdAddrs[0], pdapi.Schedulers) resp, err := util.InternalHTTPClient().Post(scheduleURL, "application/json", bytes.NewBuffer(v)) if err != nil { return err @@ -1217,7 +1217,7 @@ func (h *TableHandler) deleteScatterSchedule(name string) error { if err != nil { return err } - scheduleURL := fmt.Sprintf("%s://%s/pd/api/v1/schedulers/scatter-range-%s", util.InternalHTTPSchema(), pdAddrs[0], name) + scheduleURL := fmt.Sprintf("%s://%s%s", util.InternalHTTPSchema(), pdAddrs[0], pdapi.ScatterRangeSchedulerWithName(name)) req, err := http.NewRequest(http.MethodDelete, scheduleURL, nil) if err != nil { return err diff --git a/pkg/server/internal/column/BUILD.bazel b/pkg/server/internal/column/BUILD.bazel index 794b8a607f50b..fc758df83711f 100644 --- a/pkg/server/internal/column/BUILD.bazel +++ b/pkg/server/internal/column/BUILD.bazel @@ -40,7 +40,6 @@ go_test( "//pkg/server/internal/util", "//pkg/types", "//pkg/util/chunk", - "//pkg/util/mock", "@com_github_stretchr_testify//require", ], ) diff --git a/pkg/server/internal/column/column_test.go b/pkg/server/internal/column/column_test.go index b32d07b028f02..ba63d52a5b9d8 100644 --- a/pkg/server/internal/column/column_test.go +++ b/pkg/server/internal/column/column_test.go @@ -23,7 +23,6 @@ import ( "github.com/pingcap/tidb/pkg/server/internal/util" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" - "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" ) @@ -181,13 +180,11 @@ func TestDumpTextValue(t *testing.T) { var d types.Datum - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true losAngelesTz, err := time.LoadLocation("America/Los_Angeles") require.NoError(t, err) - sc.SetTimeZone(losAngelesTz) + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), losAngelesTz, func(err error) {}) - time, err := types.ParseTime(sc, "2017-01-05 23:59:59.575601", mysql.TypeDatetime, 0, nil) + time, err := types.ParseTime(typeCtx, "2017-01-05 23:59:59.575601", mysql.TypeDatetime, 0, nil) require.NoError(t, err) d.SetMysqlTime(time) columns[0].Type = mysql.TypeDatetime @@ -195,7 +192,7 @@ func TestDumpTextValue(t *testing.T) { require.NoError(t, err) require.Equal(t, "2017-01-06 00:00:00", mustDecodeStr(t, bs)) - duration, _, err := types.ParseDuration(sc, "11:30:45", 0) + duration, _, err := types.ParseDuration(typeCtx, "11:30:45", 0) require.NoError(t, err) d.SetMysqlDuration(duration) columns[0].Type = mysql.TypeDuration diff --git a/pkg/server/internal/dump/BUILD.bazel b/pkg/server/internal/dump/BUILD.bazel index df6051b402bbf..8529ba1648901 100644 --- a/pkg/server/internal/dump/BUILD.bazel +++ b/pkg/server/internal/dump/BUILD.bazel @@ -19,7 +19,6 @@ go_test( flaky = True, shard_count = 3, deps = [ - "//pkg/sessionctx/stmtctx", "//pkg/types", "@com_github_stretchr_testify//require", ], diff --git a/pkg/server/internal/dump/dump_test.go b/pkg/server/internal/dump/dump_test.go index 1c2b67d8da555..0968c34b10114 100644 --- a/pkg/server/internal/dump/dump_test.go +++ b/pkg/server/internal/dump/dump_test.go @@ -18,52 +18,51 @@ import ( "testing" "time" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" "github.com/stretchr/testify/require" ) func TestDumpBinaryTime(t *testing.T) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - parsedTime, err := types.ParseTimestamp(sc, "0000-00-00 00:00:00.000000") + typeCtx := types.DefaultStmtNoWarningContext + parsedTime, err := types.ParseTimestamp(typeCtx, "0000-00-00 00:00:00.000000") require.NoError(t, err) d := BinaryDateTime(nil, parsedTime) require.Equal(t, []byte{0}, d) - parsedTime, err = types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.Local), "1991-05-01 01:01:01.100001") + parsedTime, err = types.ParseTimestamp(typeCtx.WithLocation(time.Local), "1991-05-01 01:01:01.100001") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) // 199 & 7 composed to uint16 1991 (litter-endian) // 160 & 134 & 1 & 0 composed to uint32 1000001 (litter-endian) require.Equal(t, []byte{11, 199, 7, 5, 1, 1, 1, 1, 161, 134, 1, 0}, d) - parsedTime, err = types.ParseDatetime(sc, "0000-00-00 00:00:00.000000") + parsedTime, err = types.ParseDatetime(typeCtx, "0000-00-00 00:00:00.000000") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) require.Equal(t, []byte{0}, d) - parsedTime, err = types.ParseDatetime(sc, "1993-07-13 01:01:01.000000") + parsedTime, err = types.ParseDatetime(typeCtx, "1993-07-13 01:01:01.000000") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) // 201 & 7 composed to uint16 1993 (litter-endian) require.Equal(t, []byte{7, 201, 7, 7, 13, 1, 1, 1}, d) - parsedTime, err = types.ParseDate(sc, "0000-00-00") + parsedTime, err = types.ParseDate(typeCtx, "0000-00-00") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) require.Equal(t, []byte{0}, d) - parsedTime, err = types.ParseDate(sc, "1992-06-01") + parsedTime, err = types.ParseDate(typeCtx, "1992-06-01") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) // 200 & 7 composed to uint16 1992 (litter-endian) require.Equal(t, []byte{4, 200, 7, 6, 1}, d) - parsedTime, err = types.ParseDate(sc, "0000-00-00") + parsedTime, err = types.ParseDate(typeCtx, "0000-00-00") require.NoError(t, err) d = BinaryDateTime(nil, parsedTime) require.Equal(t, []byte{0}, d) - myDuration, _, err := types.ParseDuration(sc, "0000-00-00 00:00:00.000000", 6) + myDuration, _, err := types.ParseDuration(typeCtx, "0000-00-00 00:00:00.000000", 6) require.NoError(t, err) d = BinaryTime(myDuration.Duration) require.Equal(t, []byte{0}, d) diff --git a/pkg/server/rpc_server.go b/pkg/server/rpc_server.go index 4145cfbdbec9b..64225ccaca210 100644 --- a/pkg/server/rpc_server.go +++ b/pkg/server/rpc_server.go @@ -236,7 +236,7 @@ func (s *rpcServer) createSession() (session.Session, error) { vars.StmtCtx.InitMemTracker(memory.LabelForSQLText, -1) vars.StmtCtx.MemTracker.AttachTo(vars.MemTracker) if variable.OOMAction.Load() == variable.OOMActionCancel { - action := &memory.PanicOnExceed{} + action := &memory.PanicOnExceed{Killer: &vars.SQLKiller} vars.MemTracker.SetActionOnExceed(action) } se.SetSessionManager(s.sm) diff --git a/pkg/server/server.go b/pkg/server/server.go index e03350cb4dd0c..6b3a08ceef8c1 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -69,6 +69,7 @@ import ( "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/fastrand" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/pingcap/tidb/pkg/util/sys/linux" "github.com/pingcap/tidb/pkg/util/timeutil" uatomic "go.uber.org/atomic" @@ -889,9 +890,9 @@ func (s *Server) GetTLSConfig() *tls.Config { func killQuery(conn *clientConn, maxExecutionTime bool) { sessVars := conn.ctx.GetSessionVars() if maxExecutionTime { - atomic.StoreUint32(&sessVars.Killed, 2) + sessVars.SQLKiller.SendKillSignal(sqlkiller.MaxExecTimeExceeded) } else { - atomic.StoreUint32(&sessVars.Killed, 1) + sessVars.SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) } conn.mu.RLock() cancelFunc := conn.mu.cancelFunc diff --git a/pkg/session/BUILD.bazel b/pkg/session/BUILD.bazel index 2454f7028edfb..8cc1ed1a24e21 100644 --- a/pkg/session/BUILD.bazel +++ b/pkg/session/BUILD.bazel @@ -85,11 +85,11 @@ go_library( "//pkg/util/kvcache", "//pkg/util/logutil", "//pkg/util/logutil/consistency", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/parser", "//pkg/util/sem", "//pkg/util/sli", + "//pkg/util/sqlescape", "//pkg/util/sqlexec", "//pkg/util/syncutil", "//pkg/util/tableutil", diff --git a/pkg/session/bootstrap.go b/pkg/session/bootstrap.go index 3888ebe096165..bc3b231aa5787 100644 --- a/pkg/session/bootstrap.go +++ b/pkg/session/bootstrap.go @@ -53,6 +53,7 @@ import ( "github.com/pingcap/tidb/pkg/util/intest" "github.com/pingcap/tidb/pkg/util/logutil" utilparser "github.com/pingcap/tidb/pkg/util/parser" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/pingcap/tidb/pkg/util/timeutil" "go.etcd.io/etcd/client/v3/concurrency" @@ -183,7 +184,7 @@ const ( // Maybe we will put it back to INFORMATION_SCHEMA. CreateGlobalVariablesTable = `CREATE TABLE IF NOT EXISTS mysql.GLOBAL_VARIABLES( VARIABLE_NAME VARCHAR(64) NOT NULL PRIMARY KEY, - VARIABLE_VALUE VARCHAR(1024) DEFAULT NULL);` + VARIABLE_VALUE VARCHAR(16383) DEFAULT NULL);` // CreateTiDBTable is the SQL statement creates a table in system db. // This table is a key-value struct contains some information used by TiDB. // Currently we only put bootstrapped in it which indicates if the system is already bootstrapped. @@ -751,6 +752,8 @@ const ( // The variable name in mysql.tidb table and it records the default value of // oom-action when upgrade from v3.0.x to v4.0.11+. tidbDefOOMAction = "default_oom_action" + // The variable name in mysql.tidb table and it records the current DDLTableVersion + tidbDDLTableVersion = "ddl_table_version" // Const for TiDB server version 2. version2 = 2 version3 = 3 @@ -1014,11 +1017,19 @@ const ( // version 177 // add `mysql.dist_framework_meta` version177 = 177 + + // version 178 + // write mDDLTableVersion into `mysql.tidb` table + version178 = 178 + + // vresion 179 + // enlarge `VARIABLE_VALUE` of `mysql.global_variables` from `varchar(1024)` to `varchar(16383)`. + version179 = 179 ) // currentBootstrapVersion is defined as a variable, so we can modify its value for testing. // please make sure this is the largest version -var currentBootstrapVersion int64 = version177 +var currentBootstrapVersion int64 = version179 // DDL owner key's expired time is ManagerSessionTTL seconds, we should wait the time and give more time to have a chance to finish it. var internalSQLTimeout = owner.ManagerSessionTTL + 15 @@ -1171,6 +1182,8 @@ var ( upgradeToVer175, upgradeToVer176, upgradeToVer177, + upgradeToVer178, + upgradeToVer179, } ) @@ -2840,6 +2853,43 @@ func upgradeToVer177(s Session, ver int64) { } // ignore error when upgrading from v7.4 to higher version. doReentrantDDL(s, CreateDistFrameworkMeta, infoschema.ErrTableExists) + err := s.GetSessionVars().GlobalVarsAccessor.SetGlobalSysVar(context.Background(), variable.TiDBEnableAsyncMergeGlobalStats, variable.Off) + if err != nil { + logutil.BgLogger().Fatal("upgradeToVer177 error", zap.Error(err)) + } +} + +// writeDDLTableVersion writes mDDLTableVersion into mysql.tidb +func writeDDLTableVersion(s Session) { + var err error + var ddlTableVersion meta.DDLTableVersion + err = kv.RunInNewTxn(kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap), s.GetStore(), true, func(ctx context.Context, txn kv.Transaction) error { + t := meta.NewMeta(txn) + ddlTableVersion, err = t.CheckDDLTableVersion() + return err + }) + terror.MustNil(err) + mustExecute(s, `INSERT HIGH_PRIORITY INTO %n.%n VALUES (%?, %?, "DDL Table Version. Do not delete.") ON DUPLICATE KEY UPDATE VARIABLE_VALUE= %?`, + mysql.SystemDB, + mysql.TiDBTable, + tidbDDLTableVersion, + ddlTableVersion, + ddlTableVersion, + ) +} + +func upgradeToVer178(s Session, ver int64) { + if ver >= version178 { + return + } + writeDDLTableVersion(s) +} + +func upgradeToVer179(s Session, ver int64) { + if ver >= version179 { + return + } + doReentrantDDL(s, "ALTER TABLE mysql.global_variables MODIFY COLUMN `VARIABLE_VALUE` varchar(16383)") } func writeOOMAction(s Session) { @@ -3078,7 +3128,7 @@ func doDMLWorks(s Session) { } // sanitize k and vVal - value := fmt.Sprintf(`("%s", "%s")`, sqlexec.EscapeString(k), sqlexec.EscapeString(vVal)) + value := fmt.Sprintf(`("%s", "%s")`, sqlescape.EscapeString(k), sqlescape.EscapeString(vVal)) values = append(values, value) } sql := fmt.Sprintf("INSERT HIGH_PRIORITY INTO %s.%s VALUES %s;", mysql.SystemDB, mysql.GlobalVariablesTable, @@ -3098,6 +3148,8 @@ func doDMLWorks(s Session) { writeStmtSummaryVars(s) + writeDDLTableVersion(s) + ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnBootstrap) _, err := s.ExecuteInternal(ctx, "COMMIT") if err != nil { diff --git a/pkg/session/bootstrap_test.go b/pkg/session/bootstrap_test.go index 6b37120bec02b..e23b15fca0326 100644 --- a/pkg/session/bootstrap_test.go +++ b/pkg/session/bootstrap_test.go @@ -2118,3 +2118,110 @@ func TestTiDBUpgradeToVer177(t *testing.T) { MustExec(t, seV176, "SELECT * from mysql.dist_framework_meta") dom.Close() } + +func TestWriteDDLTableVersionToMySQLTiDB(t *testing.T) { + ctx := context.Background() + store, dom := CreateStoreAndBootstrap(t) + defer func() { require.NoError(t, store.Close()) }() + + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + ddlTableVer, err := m.CheckDDLTableVersion() + require.NoError(t, err) + + // Verify that 'ddl_table_version' has been set to the correct value + se := CreateSessionAndSetID(t, store) + r := MustExecToRecodeSet(t, se, fmt.Sprintf(`SELECT VARIABLE_VALUE from mysql.TiDB where VARIABLE_NAME='%s'`, tidbDDLTableVersion)) + req := r.NewChunk(nil) + err = r.Next(ctx, req) + require.NoError(t, err) + require.Equal(t, 1, req.NumRows()) + require.Equal(t, []byte(fmt.Sprintf("%d", ddlTableVer)), req.GetRow(0).GetBytes(0)) + require.NoError(t, r.Close()) + dom.Close() +} + +func TestWriteDDLTableVersionToMySQLTiDBWhenUpgradingTo178(t *testing.T) { + ctx := context.Background() + store, _ := CreateStoreAndBootstrap(t) + defer func() { require.NoError(t, store.Close()) }() + + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + ddlTableVer, err := m.CheckDDLTableVersion() + require.NoError(t, err) + + // bootstrap as version177 + ver177 := version177 + seV177 := CreateSessionAndSetID(t, store) + err = m.FinishBootstrap(int64(ver177)) + require.NoError(t, err) + MustExec(t, seV177, fmt.Sprintf("update mysql.tidb set variable_value=%d where variable_name='tidb_server_version'", ver177)) + // remove the ddl_table_version entry from mysql.tidb table + MustExec(t, seV177, fmt.Sprintf("delete from mysql.tidb where VARIABLE_NAME='%s'", tidbDDLTableVersion)) + err = txn.Commit(ctx) + require.NoError(t, err) + unsetStoreBootstrapped(store.UUID()) + ver, err := getBootstrapVersion(seV177) + require.NoError(t, err) + require.Equal(t, int64(ver177), ver) + + // upgrade to current version + domCurVer, err := BootstrapSession(store) + require.NoError(t, err) + defer domCurVer.Close() + seCurVer := CreateSessionAndSetID(t, store) + ver, err = getBootstrapVersion(seCurVer) + require.NoError(t, err) + require.Equal(t, currentBootstrapVersion, ver) + + // check if the DDLTableVersion has been set in the `mysql.tidb` table during upgrade + r := MustExecToRecodeSet(t, seCurVer, fmt.Sprintf(`SELECT VARIABLE_VALUE from mysql.TiDB where VARIABLE_NAME='%s'`, tidbDDLTableVersion)) + req := r.NewChunk(nil) + err = r.Next(ctx, req) + require.NoError(t, err) + require.Equal(t, 1, req.NumRows()) + require.Equal(t, []byte(fmt.Sprintf("%d", ddlTableVer)), req.GetRow(0).GetBytes(0)) + require.NoError(t, r.Close()) +} + +func TestTiDBUpgradeToVer179(t *testing.T) { + ctx := context.Background() + store, _ := CreateStoreAndBootstrap(t) + defer func() { + require.NoError(t, store.Close()) + }() + ver178 := version178 + seV178 := CreateSessionAndSetID(t, store) + txn, err := store.Begin() + require.NoError(t, err) + m := meta.NewMeta(txn) + err = m.FinishBootstrap(int64(ver178)) + require.NoError(t, err) + MustExec(t, seV178, fmt.Sprintf("update mysql.tidb set variable_value=%d where variable_name='tidb_server_version'", ver178)) + err = txn.Commit(context.Background()) + require.NoError(t, err) + + unsetStoreBootstrapped(store.UUID()) + ver, err := getBootstrapVersion(seV178) + require.NoError(t, err) + require.Equal(t, int64(ver178), ver) + + dom, err := BootstrapSession(store) + require.NoError(t, err) + ver, err = getBootstrapVersion(seV178) + require.NoError(t, err) + require.Less(t, int64(ver178), ver) + + r := MustExecToRecodeSet(t, seV178, "desc mysql.global_variables") + req := r.NewChunk(nil) + err = r.Next(ctx, req) + require.NoError(t, err) + require.Equal(t, 2, req.NumRows()) + require.Equal(t, []byte("varchar(16383)"), req.GetRow(1).GetBytes(1)) + require.NoError(t, r.Close()) + + dom.Close() +} diff --git a/pkg/session/bootstraptest/bootstrap_upgrade_test.go b/pkg/session/bootstraptest/bootstrap_upgrade_test.go index d4f681d4f58a1..da38f275afe76 100644 --- a/pkg/session/bootstraptest/bootstrap_upgrade_test.go +++ b/pkg/session/bootstraptest/bootstrap_upgrade_test.go @@ -605,22 +605,28 @@ func TestUpgradeVersionForResumeJob(t *testing.T) { wg.Wait() // Make sure the second add index operation is successful. - sql := fmt.Sprintf("select job_meta from mysql.tidb_ddl_history where job_id=%d or job_id=%d order by job_id", jobID, jobID+1) + sql := fmt.Sprintf("select job_meta from mysql.tidb_ddl_history where job_id >=%d order by job_id", jobID) rows, err := execute(context.Background(), seLatestV, sql) require.NoError(t, err) - require.Len(t, rows, 2) + require.GreaterOrEqual(t, len(rows), 2) var idxFinishTS uint64 for i, row := range rows { jobBinary := row.GetBytes(0) runJob := model.Job{} err := runJob.Decode(jobBinary) require.NoError(t, err) - require.True(t, strings.Contains(runJob.TableName, "upgrade_tbl")) require.Equal(t, model.JobStateSynced.String(), runJob.State.String()) if i == 0 { + // The first add index op. idxFinishTS = runJob.BinlogInfo.FinishedTS } else { - require.Greater(t, runJob.BinlogInfo.FinishedTS, idxFinishTS) + // The second add index op. + if strings.Contains(runJob.TableName, "upgrade_tbl") { + require.Greater(t, runJob.BinlogInfo.FinishedTS, idxFinishTS) + } else { + // The upgrade DDL ops. These jobs' finishedTS must less than add index ops. + require.Less(t, runJob.BinlogInfo.FinishedTS, idxFinishTS) + } } } } diff --git a/pkg/session/nontransactional.go b/pkg/session/nontransactional.go index 05cedbb096f34..197643c48ebb3 100644 --- a/pkg/session/nontransactional.go +++ b/pkg/session/nontransactional.go @@ -40,7 +40,6 @@ import ( "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" @@ -510,7 +509,7 @@ func buildShardJobs(ctx context.Context, stmt *ast.NonTransactionalDMLStmt, se S } newEnd := row.GetDatum(0, &rs.Fields()[0].Column.FieldType) if currentSize >= batchSize { - cmp, err := newEnd.Compare(se.GetSessionVars().StmtCtx, ¤tEnd, collate.GetCollator(shardColumnCollate)) + cmp, err := newEnd.Compare(se.GetSessionVars().StmtCtx.TypeCtx(), ¤tEnd, collate.GetCollator(shardColumnCollate)) if err != nil { return nil, err } @@ -850,5 +849,5 @@ func buildExecuteResults(ctx context.Context, jobs []job, maxChunkSize int, reda zap.Int("num_failed_jobs", len(failedJobs)), zap.String("failed_jobs", errStr)) return nil, fmt.Errorf("%d/%d jobs failed in the non-transactional DML: %s, ...(more in logs)", - len(failedJobs), len(jobs), errStr[:mathutil.Min(500, len(errStr)-1)]) + len(failedJobs), len(jobs), errStr[:min(500, len(errStr)-1)]) } diff --git a/pkg/session/session.go b/pkg/session/session.go index 4fc30d8786929..f80cc6d4fccde 100644 --- a/pkg/session/session.go +++ b/pkg/session/session.go @@ -94,10 +94,10 @@ import ( "github.com/pingcap/tidb/pkg/util/kvcache" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/logutil/consistency" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/sem" "github.com/pingcap/tidb/pkg/util/sli" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/pingcap/tidb/pkg/util/syncutil" "github.com/pingcap/tidb/pkg/util/tableutil" @@ -1718,7 +1718,7 @@ func (s *session) Parse(ctx context.Context, sql string) ([]ast.StmtNode, error) func (s *session) ParseWithParams(ctx context.Context, sql string, args ...interface{}) (ast.StmtNode, error) { var err error if len(args) > 0 { - sql, err = sqlexec.EscapeSQL(sql, args...) + sql, err = sqlescape.EscapeSQL(sql, args...) if err != nil { return nil, err } @@ -1742,7 +1742,7 @@ func (s *session) ParseWithParams(ctx context.Context, sql string, args ...inter } if err != nil { s.rollbackOnError(ctx) - logSQL := sql[:mathutil.Min(500, len(sql))] + logSQL := sql[:min(500, len(sql))] if s.sessionVars.EnableRedactLog { logutil.Logger(ctx).Debug("parse SQL failed", zap.Error(err), zap.String("SQL", logSQL)) } else { @@ -2870,9 +2870,9 @@ func autolockAction(s *session, passwordLocking *privileges.PasswordLocking, use func (s *session) passwordLocking(user string, host string, newAttributesStr string) error { sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.UserTable) - sqlexec.MustFormatSQL(sql, "user_attributes=json_merge_patch(coalesce(user_attributes, '{}'), %?)", newAttributesStr) - sqlexec.MustFormatSQL(sql, " WHERE Host=%? and User=%?;", host, user) + sqlescape.MustFormatSQL(sql, "UPDATE %n.%n SET ", mysql.SystemDB, mysql.UserTable) + sqlescape.MustFormatSQL(sql, "user_attributes=json_merge_patch(coalesce(user_attributes, '{}'), %?)", newAttributesStr) + sqlescape.MustFormatSQL(sql, " WHERE Host=%? and User=%?;", host, user) ctx := kv.WithInternalSourceType(context.Background(), kv.InternalTxnPrivilege) _, err := s.ExecuteInternal(ctx, sql.String()) return err diff --git a/pkg/session/sync_upgrade.go b/pkg/session/sync_upgrade.go index 97f1a01b8589e..384a505fae871 100644 --- a/pkg/session/sync_upgrade.go +++ b/pkg/session/sync_upgrade.go @@ -61,11 +61,11 @@ func SyncUpgradeState(s sessionctx.Context, timeout time.Duration) error { childCtx, cancel := context.WithTimeout(ctx, 3*time.Second) op, err = owner.GetOwnerOpValue(childCtx, dom.EtcdClient(), ddl.DDLOwnerKey, "upgrade bootstrap") cancel() - if err == nil && op.String() == owner.OpGetUpgradingState.String() { + if err == nil && op.IsSyncedUpgradingState() { break } if i%10 == 0 { - logger.Warn("get owner op failed", zap.Stringer("state", op), zap.Error(err)) + logger.Warn("get owner op failed", zap.Stringer("op", op), zap.Error(err)) } time.Sleep(interval) } diff --git a/pkg/session/test/BUILD.bazel b/pkg/session/test/BUILD.bazel index 6bbcc53ed021d..328aadcb5b3b4 100644 --- a/pkg/session/test/BUILD.bazel +++ b/pkg/session/test/BUILD.bazel @@ -27,6 +27,7 @@ go_test( "//pkg/testkit/testutil", "//pkg/types", "//pkg/util", + "//pkg/util/sqlescape", "//pkg/util/sqlexec", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_kvproto//pkg/coprocessor", diff --git a/pkg/session/test/meta/session_test.go b/pkg/session/test/meta/session_test.go index 055b036b61434..e3be6077e1c82 100644 --- a/pkg/session/test/meta/session_test.go +++ b/pkg/session/test/meta/session_test.go @@ -155,9 +155,9 @@ func TestInformationSchemaCreateTime(t *testing.T) { ret1 := tk.MustQuery("select create_time from information_schema.tables where table_name='t';") ret2 := tk.MustQuery("show table status like 't'") require.Equal(t, ret2.Rows()[0][11].(string), ret1.Rows()[0][0].(string)) - typ1, err := types.ParseDatetime(nil, ret.Rows()[0][0].(string)) + typ1, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, ret.Rows()[0][0].(string)) require.NoError(t, err) - typ2, err := types.ParseDatetime(nil, ret1.Rows()[0][0].(string)) + typ2, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, ret1.Rows()[0][0].(string)) require.NoError(t, err) r := typ2.Compare(typ1) require.Equal(t, 1, r) @@ -166,7 +166,7 @@ func TestInformationSchemaCreateTime(t *testing.T) { ret = tk.MustQuery(`select create_time from information_schema.tables where table_name='t'`) ret2 = tk.MustQuery(`show table status like 't'`) require.Equal(t, ret2.Rows()[0][11].(string), ret.Rows()[0][0].(string)) - typ3, err := types.ParseDatetime(nil, ret.Rows()[0][0].(string)) + typ3, err := types.ParseDatetime(types.DefaultStmtNoWarningContext, ret.Rows()[0][0].(string)) require.NoError(t, err) // Asia/Shanghai 2022-02-17 17:40:05 > Europe/Amsterdam 2022-02-17 10:40:05 r = typ2.Compare(typ3) diff --git a/pkg/session/test/session_test.go b/pkg/session/test/session_test.go index 1290de0691a70..aaf1bd67a4858 100644 --- a/pkg/session/test/session_test.go +++ b/pkg/session/test/session_test.go @@ -42,6 +42,7 @@ import ( "github.com/pingcap/tidb/pkg/testkit/testutil" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/tikvrpc" @@ -727,9 +728,9 @@ func TestRandomBinary(t *testing.T) { var val string for i, bytes := range allBytes { if i == 0 { - val += sqlexec.MustEscapeSQL("(874, 0, 1, %?, 3)", bytes) + val += sqlescape.MustEscapeSQL("(874, 0, 1, %?, 3)", bytes) } else { - val += sqlexec.MustEscapeSQL(",(874, 0, 1, %?, 3)", bytes) + val += sqlescape.MustEscapeSQL(",(874, 0, 1, %?, 3)", bytes) } } sql += val diff --git a/pkg/session/test/variable/BUILD.bazel b/pkg/session/test/variable/BUILD.bazel index 5ac4b9fb2415f..380a6dff7e4b8 100644 --- a/pkg/session/test/variable/BUILD.bazel +++ b/pkg/session/test/variable/BUILD.bazel @@ -18,6 +18,7 @@ go_test( "//pkg/testkit", "//pkg/testkit/testmain", "//pkg/testkit/testsetup", + "//pkg/util/dbterror/exeerrors", "//pkg/util/memory", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", diff --git a/pkg/session/test/variable/variable_test.go b/pkg/session/test/variable/variable_test.go index 5e5adbf023d4d..f4a57c23abcca 100644 --- a/pkg/session/test/variable/variable_test.go +++ b/pkg/session/test/variable/variable_test.go @@ -26,6 +26,7 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/store/copr" "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/memory" "github.com/stretchr/testify/require" ) @@ -125,7 +126,7 @@ func TestCoprocessorOOMAction(t *testing.T) { tk.MustExec(fmt.Sprintf("set @@tidb_mem_quota_query=%v;", quota)) err := tk.QueryToErr(sql) require.Error(t, err) - require.Regexp(t, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery, err) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) } require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/store/copr/testRateLimitActionMockWaitMax", `return(true)`)) @@ -172,7 +173,7 @@ func TestCoprocessorOOMAction(t *testing.T) { tk.MustExec("set @@tidb_mem_quota_query=1;") err = tk.QueryToErr(testcase.sql) require.Error(t, err) - require.Regexp(t, memory.PanicMemoryExceedWarnMsg+memory.WarnMsgSuffixForSingleQuery, err) + require.True(t, exeerrors.ErrMemoryExceedForQuery.Equal(err)) se.Close() } } diff --git a/pkg/sessionctx/stmtctx/BUILD.bazel b/pkg/sessionctx/stmtctx/BUILD.bazel index 1178d300448ea..f768b8c2eb7a1 100644 --- a/pkg/sessionctx/stmtctx/BUILD.bazel +++ b/pkg/sessionctx/stmtctx/BUILD.bazel @@ -12,7 +12,7 @@ go_library( "//pkg/parser/model", "//pkg/parser/mysql", "//pkg/parser/terror", - "//pkg/types/context", + "//pkg/types", "//pkg/util/disk", "//pkg/util/execdetails", "//pkg/util/intest", @@ -47,7 +47,6 @@ go_test( "//pkg/testkit", "//pkg/testkit/testsetup", "//pkg/types", - "//pkg/types/context", "//pkg/util/execdetails", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//require", diff --git a/pkg/sessionctx/stmtctx/stmtctx.go b/pkg/sessionctx/stmtctx/stmtctx.go index 0a181e325f8f5..2216d05ef3f99 100644 --- a/pkg/sessionctx/stmtctx/stmtctx.go +++ b/pkg/sessionctx/stmtctx/stmtctx.go @@ -34,7 +34,7 @@ import ( "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" - typectx "github.com/pingcap/tidb/pkg/types/context" + "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/disk" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/intest" @@ -156,7 +156,7 @@ type StatementContext struct { _ constructor.Constructor `ctor:"NewStmtCtx,NewStmtCtxWithTimeZone,Reset"` // typeCtx is used to indicate how to make the type conversation. - typeCtx typectx.Context + typeCtx types.Context // Set the following variables before execution StmtHints @@ -176,8 +176,6 @@ type StatementContext struct { InCreateOrAlterStmt bool InSetSessionStatesStmt bool InPreparedPlanBuilding bool - IgnoreZeroInDate bool - NoZeroDate bool DupKeyAsWarning bool BadNullAsWarning bool DividedByZeroAsWarning bool @@ -188,7 +186,6 @@ type StatementContext struct { CacheType PlanCacheType BatchCheck bool InNullRejectCheck bool - AllowInvalidDate bool IgnoreNoPartition bool IgnoreExplainIDSuffix bool MultiSchemaInfo *model.MultiSchemaInfo @@ -428,7 +425,7 @@ type StatementContext struct { // NewStmtCtx creates a new statement context func NewStmtCtx() *StatementContext { sc := &StatementContext{} - sc.typeCtx = typectx.NewContext(typectx.StrictFlags, time.UTC, sc.AppendWarning) + sc.typeCtx = types.NewContext(types.DefaultStmtFlags, time.UTC, sc.AppendWarning) return sc } @@ -436,14 +433,14 @@ func NewStmtCtx() *StatementContext { func NewStmtCtxWithTimeZone(tz *time.Location) *StatementContext { intest.Assert(tz) sc := &StatementContext{} - sc.typeCtx = typectx.NewContext(typectx.StrictFlags, tz, sc.AppendWarning) + sc.typeCtx = types.NewContext(types.DefaultStmtFlags, tz, sc.AppendWarning) return sc } // Reset resets a statement context func (sc *StatementContext) Reset() { *sc = StatementContext{ - typeCtx: typectx.NewContext(typectx.StrictFlags, time.UTC, sc.AppendWarning), + typeCtx: types.NewContext(types.DefaultStmtFlags, time.UTC, sc.AppendWarning), } } @@ -459,17 +456,17 @@ func (sc *StatementContext) SetTimeZone(tz *time.Location) { } // TypeCtx returns the type context -func (sc *StatementContext) TypeCtx() typectx.Context { +func (sc *StatementContext) TypeCtx() types.Context { return sc.typeCtx } // TypeFlags returns the type flags -func (sc *StatementContext) TypeFlags() typectx.Flags { +func (sc *StatementContext) TypeFlags() types.Flags { return sc.typeCtx.Flags() } // SetTypeFlags sets the type flags -func (sc *StatementContext) SetTypeFlags(flags typectx.Flags) { +func (sc *StatementContext) SetTypeFlags(flags types.Flags) { sc.typeCtx = sc.typeCtx.WithFlags(flags) } @@ -1145,7 +1142,7 @@ func (sc *StatementContext) PushDownFlags() uint64 { if sc.OverflowAsWarning { flags |= model.FlagOverflowAsWarning } - if sc.IgnoreZeroInDate { + if sc.TypeFlags().IgnoreZeroInDate() { flags |= model.FlagIgnoreZeroInDate } if sc.DividedByZeroAsWarning { @@ -1210,14 +1207,13 @@ func (sc *StatementContext) InitFromPBFlagAndTz(flags uint64, tz *time.Location) sc.InSelectStmt = (flags & model.FlagInSelectStmt) > 0 sc.InDeleteStmt = (flags & model.FlagInUpdateOrDeleteStmt) > 0 sc.OverflowAsWarning = (flags & model.FlagOverflowAsWarning) > 0 - sc.IgnoreZeroInDate = (flags & model.FlagIgnoreZeroInDate) > 0 sc.DividedByZeroAsWarning = (flags & model.FlagDividedByZeroAsWarning) > 0 sc.SetTimeZone(tz) - sc.SetTypeFlags(typectx.StrictFlags. + sc.SetTypeFlags(types.DefaultStmtFlags. WithIgnoreTruncateErr((flags & model.FlagIgnoreTruncate) > 0). WithTruncateAsWarning((flags & model.FlagTruncateAsWarning) > 0). - WithClipNegativeToZero(sc.InInsertStmt), - ) + WithIgnoreZeroInDate((flags & model.FlagIgnoreZeroInDate) > 0). + WithAllowNegativeToUnsigned(!sc.InInsertStmt)) } // GetLockWaitStartTime returns the statement pessimistic lock wait start time @@ -1360,12 +1356,12 @@ func (sc *StatementContext) RecordedStatsLoadStatusCnt() (cnt int) { // If the statement context is nil, it'll return a newly created default type context. // **don't** use this function if you can make sure the `sc` is not nil. We should limit the usage of this function as // little as possible. -func (sc *StatementContext) TypeCtxOrDefault() typectx.Context { +func (sc *StatementContext) TypeCtxOrDefault() types.Context { if sc != nil { return sc.typeCtx } - return typectx.DefaultNoWarningContext + return types.DefaultStmtNoWarningContext } // UsedStatsInfoForTable records stats that are used during query and their information. diff --git a/pkg/sessionctx/stmtctx/stmtctx_test.go b/pkg/sessionctx/stmtctx/stmtctx_test.go index c1e03af0166d6..60b5bdad3ff84 100644 --- a/pkg/sessionctx/stmtctx/stmtctx_test.go +++ b/pkg/sessionctx/stmtctx/stmtctx_test.go @@ -30,7 +30,6 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/pkg/types" - typectx "github.com/pingcap/tidb/pkg/types/context" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/util" @@ -97,7 +96,7 @@ func TestStatementContextPushDownFLags(t *testing.T) { {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) }), 1}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) }), 2}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.OverflowAsWarning = true }), 64}, - {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.IgnoreZeroInDate = true }), 128}, + {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) }), 128}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.DividedByZeroAsWarning = true }), 256}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InLoadDataStmt = true }), 1024}, {newStmtCtx(func(sc *stmtctx.StatementContext) { @@ -110,7 +109,7 @@ func TestStatementContextPushDownFLags(t *testing.T) { }), 257}, {newStmtCtx(func(sc *stmtctx.StatementContext) { sc.InUpdateStmt = true - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreZeroInDate(true)) sc.InLoadDataStmt = true }), 1168}, } @@ -316,7 +315,7 @@ func TestStmtHintsClone(t *testing.T) { func TestNewStmtCtx(t *testing.T) { sc := stmtctx.NewStmtCtx() - require.Equal(t, types.StrictFlags, sc.TypeFlags()) + require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.Same(t, time.UTC, sc.TimeZone()) require.Same(t, time.UTC, sc.TimeZone()) sc.AppendWarning(errors.New("err1")) @@ -327,7 +326,7 @@ func TestNewStmtCtx(t *testing.T) { tz := time.FixedZone("UTC+1", 2*60*60) sc = stmtctx.NewStmtCtxWithTimeZone(tz) - require.Equal(t, types.StrictFlags, sc.TypeFlags()) + require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.Same(t, tz, sc.TimeZone()) require.Same(t, tz, sc.TimeZone()) sc.AppendWarning(errors.New("err2")) @@ -347,37 +346,37 @@ func TestSetStmtCtxTimeZone(t *testing.T) { func TestSetStmtCtxTypeFlags(t *testing.T) { sc := stmtctx.NewStmtCtx() - require.Equal(t, types.StrictFlags, sc.TypeFlags()) + require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) - sc.SetTypeFlags(typectx.FlagClipNegativeToZero | typectx.FlagSkipASCIICheck) - require.Equal(t, typectx.FlagClipNegativeToZero|typectx.FlagSkipASCIICheck, sc.TypeFlags()) + sc.SetTypeFlags(types.FlagAllowNegativeToUnsigned | types.FlagSkipASCIICheck) + require.Equal(t, types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck, sc.TypeFlags()) require.Equal(t, sc.TypeFlags(), sc.TypeFlags()) - sc.SetTypeFlags(typectx.FlagSkipASCIICheck | typectx.FlagSkipUTF8Check | typectx.FlagInvalidDateAsWarning) - require.Equal(t, typectx.FlagSkipASCIICheck|typectx.FlagSkipUTF8Check|typectx.FlagInvalidDateAsWarning, sc.TypeFlags()) + sc.SetTypeFlags(types.FlagSkipASCIICheck | types.FlagSkipUTF8Check | types.FlagTruncateAsWarning) + require.Equal(t, types.FlagSkipASCIICheck|types.FlagSkipUTF8Check|types.FlagTruncateAsWarning, sc.TypeFlags()) require.Equal(t, sc.TypeFlags(), sc.TypeFlags()) } func TestResetStmtCtx(t *testing.T) { sc := stmtctx.NewStmtCtx() - require.Equal(t, types.StrictFlags, sc.TypeFlags()) + require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) tz := time.FixedZone("UTC+1", 2*60*60) sc.SetTimeZone(tz) - sc.SetTypeFlags(typectx.FlagClipNegativeToZero | typectx.FlagSkipASCIICheck) + sc.SetTypeFlags(types.FlagAllowNegativeToUnsigned | types.FlagSkipASCIICheck) sc.AppendWarning(errors.New("err1")) sc.InRestrictedSQL = true sc.StmtType = "Insert" require.Same(t, tz, sc.TimeZone()) - require.Equal(t, typectx.FlagClipNegativeToZero|typectx.FlagSkipASCIICheck, sc.TypeFlags()) + require.Equal(t, types.FlagAllowNegativeToUnsigned|types.FlagSkipASCIICheck, sc.TypeFlags()) require.Equal(t, 1, len(sc.GetWarnings())) sc.Reset() require.Same(t, time.UTC, sc.TimeZone()) require.Same(t, time.UTC, sc.TimeZone()) - require.Equal(t, types.StrictFlags, sc.TypeFlags()) - require.Equal(t, types.StrictFlags, sc.TypeFlags()) + require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) + require.Equal(t, types.DefaultStmtFlags, sc.TypeFlags()) require.False(t, sc.InRestrictedSQL) require.Empty(t, sc.StmtType) require.Equal(t, 0, len(sc.GetWarnings())) diff --git a/pkg/sessionctx/variable/BUILD.bazel b/pkg/sessionctx/variable/BUILD.bazel index cf4cca28db875..081592a5e5b08 100644 --- a/pkg/sessionctx/variable/BUILD.bazel +++ b/pkg/sessionctx/variable/BUILD.bazel @@ -57,6 +57,7 @@ go_library( "//pkg/util/replayer", "//pkg/util/rowcodec", "//pkg/util/size", + "//pkg/util/sqlkiller", "//pkg/util/stmtsummary/v2:stmtsummary", "//pkg/util/stringutil", "//pkg/util/tableutil", diff --git a/pkg/sessionctx/variable/session.go b/pkg/sessionctx/variable/session.go index 4f4ff2d2b5afa..23e7df757cbbd 100644 --- a/pkg/sessionctx/variable/session.go +++ b/pkg/sessionctx/variable/session.go @@ -56,6 +56,7 @@ import ( "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/replayer" "github.com/pingcap/tidb/pkg/util/rowcodec" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/pingcap/tidb/pkg/util/stringutil" "github.com/pingcap/tidb/pkg/util/tableutil" "github.com/pingcap/tidb/pkg/util/tiflash" @@ -1080,8 +1081,8 @@ type SessionVars struct { // See https://github.com/pingcap/tidb/blob/7105505a78fc886c33258caa5813baf197b15247/docs/design/2023-06-30-configurable-kv-timeout.md?plain=1#L14-L15 TiKVClientReadTimeout uint64 - // Killed is a flag to indicate that this query is killed. - Killed uint32 + // SQLKiller is a flag to indicate that this query is killed. + SQLKiller sqlkiller.SQLKiller // ConnectionStatus indicates current connection status. ConnectionStatus int32 @@ -1434,6 +1435,9 @@ type SessionVars struct { // AnalyzePartitionMergeConcurrency indicates concurrency for merging partition stats AnalyzePartitionMergeConcurrency int + // EnableAsyncMergeGlobalStats indicates whether to enable async merge global stats + EnableAsyncMergeGlobalStats bool + // EnableExternalTSRead indicates whether to enable read through external ts EnableExternalTSRead bool @@ -2019,7 +2023,7 @@ func NewSessionVars(hctx HookContext) *SessionVars { ResourceGroupName: resourcegroup.DefaultResourceGroupName, DefaultCollationForUTF8MB4: mysql.DefaultCollationName, } - vars.KVVars = tikvstore.NewVariables(&vars.Killed) + vars.KVVars = tikvstore.NewVariables(&vars.SQLKiller.Signal) vars.Concurrency = Concurrency{ indexLookupConcurrency: DefIndexLookupConcurrency, indexSerialScanConcurrency: DefIndexSerialScanConcurrency, @@ -2062,6 +2066,7 @@ func NewSessionVars(hctx HookContext) *SessionVars { vars.DiskTracker = disk.NewTracker(memory.LabelForSession, -1) vars.MemTracker = memory.NewTracker(memory.LabelForSession, vars.MemQuotaQuery) vars.MemTracker.IsRootTrackerOfSess = true + vars.MemTracker.Killer = &vars.SQLKiller for _, engine := range config.GetGlobalConfig().IsolationRead.Engines { switch engine { diff --git a/pkg/sessionctx/variable/setvar_affect.go b/pkg/sessionctx/variable/setvar_affect.go index 1052665a3d6f0..4745c55121264 100644 --- a/pkg/sessionctx/variable/setvar_affect.go +++ b/pkg/sessionctx/variable/setvar_affect.go @@ -102,7 +102,7 @@ var isHintUpdatableVerified = map[string]struct{}{ "tidb_allow_fallback_to_tikv": {}, "tiflash_fastscan": {}, "tiflash_fine_grained_shuffle_batch_size": {}, - "tiflash_find_grained_shuffle_stream_count": {}, + "tiflash_fine_grained_shuffle_stream_count": {}, // Variables that is compatible with MySQL. "cte_max_recursion_depth": {}, "sql_mode": {}, diff --git a/pkg/sessionctx/variable/sysvar.go b/pkg/sessionctx/variable/sysvar.go index dc65f26a12da6..fa174ba8a8420 100644 --- a/pkg/sessionctx/variable/sysvar.go +++ b/pkg/sessionctx/variable/sysvar.go @@ -2370,7 +2370,13 @@ var defaultSysVars = []*SysVar{ return nil }, }, - + { + Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableAsyncMergeGlobalStats, Value: BoolToOnOff(DefTiDBEnableAsyncMergeGlobalStats), Type: TypeBool, + SetSession: func(s *SessionVars, val string) error { + s.EnableAsyncMergeGlobalStats = TiDBOptOn(val) + return nil + }, + }, {Scope: ScopeGlobal | ScopeSession, Name: TiDBOptPrefixIndexSingleScan, Value: BoolToOnOff(DefTiDBOptPrefixIndexSingleScan), Type: TypeBool, SetSession: func(s *SessionVars, val string) error { s.OptPrefixIndexSingleScan = TiDBOptOn(val) return nil @@ -2397,6 +2403,12 @@ var defaultSysVars = []*SysVar{ s.EnableReuseCheck = TiDBOptOn(val) return nil }}, + {Scope: ScopeGlobal, Name: TiDBIgnoreInlistPlanDigest, Value: BoolToOnOff(DefTiDBIgnoreInlistPlanDigest), Type: TypeBool, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { + IgnoreInlistPlanDigest.Store(TiDBOptOn(s)) + return nil + }, GetGlobal: func(ctx context.Context, vars *SessionVars) (string, error) { + return BoolToOnOff(IgnoreInlistPlanDigest.Load()), nil + }}, {Scope: ScopeGlobal, Name: TiDBTTLJobEnable, Value: BoolToOnOff(DefTiDBTTLJobEnable), Type: TypeBool, SetGlobal: func(ctx context.Context, vars *SessionVars, s string) error { EnableTTLJob.Store(TiDBOptOn(s)) return nil diff --git a/pkg/sessionctx/variable/sysvar_test.go b/pkg/sessionctx/variable/sysvar_test.go index baaf037dad7f5..e275aa7be2fde 100644 --- a/pkg/sessionctx/variable/sysvar_test.go +++ b/pkg/sessionctx/variable/sysvar_test.go @@ -1257,6 +1257,22 @@ func TestSetJobScheduleWindow(t *testing.T) { require.Equal(t, "16:11 +0800", val) } +func TestTiDBIgnoreInlistPlanDigest(t *testing.T) { + vars := NewSessionVars(nil) + mock := NewMockGlobalAccessor4Tests() + mock.SessionVars = vars + vars.GlobalVarsAccessor = mock + initValue, err := mock.GetGlobalSysVar(TiDBIgnoreInlistPlanDigest) + require.NoError(t, err) + require.Equal(t, initValue, Off) + // Set to On(init at start) + err1 := mock.SetGlobalSysVar(context.Background(), TiDBIgnoreInlistPlanDigest, On) + require.NoError(t, err1) + NewVal, err2 := mock.GetGlobalSysVar(TiDBIgnoreInlistPlanDigest) + require.NoError(t, err2) + require.Equal(t, NewVal, On) +} + func TestTiDBEnableResourceControl(t *testing.T) { // setup the hooks for test // NOTE: the default system variable is true but the switch is false diff --git a/pkg/sessionctx/variable/tidb_vars.go b/pkg/sessionctx/variable/tidb_vars.go index c7a50824b9045..5d4e827aec932 100644 --- a/pkg/sessionctx/variable/tidb_vars.go +++ b/pkg/sessionctx/variable/tidb_vars.go @@ -590,6 +590,9 @@ const ( // TiDBStmtSummaryMaxSQLLength indicates the max length of displayed normalized sql and sample sql. TiDBStmtSummaryMaxSQLLength = "tidb_stmt_summary_max_sql_length" + // TiDBIgnoreInlistPlanDigest enables TiDB to generate the same plan digest with SQL using different in-list arguments. + TiDBIgnoreInlistPlanDigest = "tidb_ignore_inlist_plan_digest" + // TiDBCapturePlanBaseline indicates whether the capture of plan baselines is enabled. TiDBCapturePlanBaseline = "tidb_capture_plan_baselines" @@ -840,7 +843,8 @@ const ( TiDBAnalyzePartitionConcurrency = "tidb_analyze_partition_concurrency" // TiDBMergePartitionStatsConcurrency indicates the concurrency when merge partition stats into global stats TiDBMergePartitionStatsConcurrency = "tidb_merge_partition_stats_concurrency" - + // TiDBEnableAsyncMergeGlobalStats indicates whether to enable async merge global stats + TiDBEnableAsyncMergeGlobalStats = "tidb_enable_async_merge_global_stats" // TiDBOptPrefixIndexSingleScan indicates whether to do some optimizations to avoid double scan for prefix index. // When set to true, `col is (not) null`(`col` is index prefix column) is regarded as index filter rather than table filter. TiDBOptPrefixIndexSingleScan = "tidb_opt_prefix_index_single_scan" @@ -889,7 +893,7 @@ const ( // TiDBOptOrderingIdxSelThresh is the threshold for optimizer to consider the ordering index. TiDBOptOrderingIdxSelThresh = "tidb_opt_ordering_index_selectivity_threshold" - // TiDBOptEnableMPPSharedCTEExecution indicates whehter the optimizer try to build shared CTE scan during MPP execution. + // TiDBOptEnableMPPSharedCTEExecution indicates whether the optimizer try to build shared CTE scan during MPP execution. TiDBOptEnableMPPSharedCTEExecution = "tidb_opt_enable_mpp_shared_cte_execution" // TiDBOptFixControl makes the user able to control some details of the optimizer behavior. TiDBOptFixControl = "tidb_opt_fix_control" @@ -1281,6 +1285,7 @@ const ( DefTiDBStmtSummaryMaxStmtCount = 3000 DefTiDBStmtSummaryMaxSQLLength = 4096 DefTiDBCapturePlanBaseline = Off + DefTiDBIgnoreInlistPlanDigest = false DefTiDBEnableIndexMerge = true DefEnableLegacyInstanceScope = true DefTiDBTableCacheLease = 3 // 3s @@ -1364,6 +1369,7 @@ const ( DefTiDBGOGCMaxValue = 500 DefTiDBGOGCMinValue = 100 DefTiDBOptPrefixIndexSingleScan = true + DefTiDBEnableAsyncMergeGlobalStats = true DefTiDBExternalTS = 0 DefTiDBEnableExternalTSRead = false DefTiDBEnableReusechunk = true @@ -1521,6 +1527,7 @@ var ( ServiceScope = atomic.NewString("") SchemaVersionCacheLimit = atomic.NewInt64(DefTiDBSchemaVersionCacheLimit) CloudStorageURI = atomic.NewString("") + IgnoreInlistPlanDigest = atomic.NewBool(DefTiDBIgnoreInlistPlanDigest) ) var ( diff --git a/pkg/sessionctx/variable/varsutil.go b/pkg/sessionctx/variable/varsutil.go index 37abdc355f08d..765367a999526 100644 --- a/pkg/sessionctx/variable/varsutil.go +++ b/pkg/sessionctx/variable/varsutil.go @@ -441,7 +441,7 @@ func parseTSFromNumberOrTime(s *SessionVars, sVal string) (uint64, error) { return tso, nil } - t, err := types.ParseTime(s.StmtCtx, sVal, mysql.TypeTimestamp, types.MaxFsp, nil) + t, err := types.ParseTime(s.StmtCtx.TypeCtx(), sVal, mysql.TypeTimestamp, types.MaxFsp, nil) if err != nil { return 0, err } @@ -456,7 +456,7 @@ func setTxnReadTS(s *SessionVars, sVal string) error { return nil } - t, err := types.ParseTime(s.StmtCtx, sVal, mysql.TypeTimestamp, types.MaxFsp, nil) + t, err := types.ParseTime(s.StmtCtx.TypeCtx(), sVal, mysql.TypeTimestamp, types.MaxFsp, nil) if err != nil { return err } diff --git a/pkg/sessiontxn/staleread/util.go b/pkg/sessiontxn/staleread/util.go index fde24b7b7c8ed..0f578694b5b80 100644 --- a/pkg/sessiontxn/staleread/util.go +++ b/pkg/sessiontxn/staleread/util.go @@ -52,7 +52,7 @@ func CalculateAsOfTsExpr(ctx context.Context, sctx sessionctx.Context, tsExpr as toTypeTimestamp := types.NewFieldType(mysql.TypeTimestamp) // We need at least the millionsecond here, so set fsp to 3. toTypeTimestamp.SetDecimal(3) - tsTimestamp, err := tsVal.ConvertTo(sctx.GetSessionVars().StmtCtx, toTypeTimestamp) + tsTimestamp, err := tsVal.ConvertTo(sctx.GetSessionVars().StmtCtx.TypeCtx(), toTypeTimestamp) if err != nil { return 0, err } diff --git a/pkg/statistics/BUILD.bazel b/pkg/statistics/BUILD.bazel index 217de5f6f42c1..69cb3ce87b2d9 100644 --- a/pkg/statistics/BUILD.bazel +++ b/pkg/statistics/BUILD.bazel @@ -74,7 +74,7 @@ go_test( data = glob(["testdata/**"]), embed = [":statistics"], flaky = True, - shard_count = 34, + shard_count = 33, deps = [ "//pkg/config", "//pkg/parser/ast", diff --git a/pkg/statistics/builder.go b/pkg/statistics/builder.go index 6812015018ef7..b7dd64a20a4ad 100644 --- a/pkg/statistics/builder.go +++ b/pkg/statistics/builder.go @@ -24,7 +24,9 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/codec" "github.com/pingcap/tidb/pkg/util/collate" + "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/memory" + "go.uber.org/zap" ) // SortedBuilder is used to build histograms for PK and index. @@ -69,7 +71,7 @@ func (b *SortedBuilder) Iterate(data types.Datum) error { b.hist.NDV = 1 return nil } - cmp, err := b.hist.GetUpper(int(b.bucketIdx)).Compare(b.sc, &data, collate.GetBinaryCollator()) + cmp, err := b.hist.GetUpper(int(b.bucketIdx)).Compare(b.sc.TypeCtx(), &data, collate.GetBinaryCollator()) if err != nil { return errors.Trace(err) } @@ -176,7 +178,7 @@ func buildHist(sc *stmtctx.StatementContext, hg *Histogram, samples []*SampleIte memTracker.BufferedConsume(&bufferedMemSize, deltaSize) memTracker.BufferedRelease(&bufferedReleaseSize, deltaSize) } - cmp, err := upper.Compare(sc, &samples[i].Value, collate.GetBinaryCollator()) + cmp, err := upper.Compare(sc.TypeCtx(), &samples[i].Value, collate.GetBinaryCollator()) if err != nil { return 0, errors.Trace(err) } @@ -373,12 +375,46 @@ func BuildHistAndTopN( if err != nil { return nil, nil, errors.Trace(err) } + // For debugging invalid sample data. + var ( + foundTwice bool + firstTimeSample types.Datum + ) for j := 0; j < len(topNList); j++ { if bytes.Equal(sampleBytes, topNList[j].Encoded) { - // find the same value in topn: need to skip over this value in samples + // This should never happen, but we met this panic before, so we add this check here. + // See: https://github.com/pingcap/tidb/issues/35948 + if foundTwice { + datumString, err := firstTimeSample.ToString() + if err != nil { + logutil.BgLogger().With( + zap.String("category", "stats"), + ).Error("try to convert datum to string failed", zap.Error(err)) + } + + logutil.BgLogger().With( + zap.String("category", "stats"), + ).Warn( + "invalid sample data", + zap.Bool("isColumn", isColumn), + zap.Int64("columnID", id), + zap.String("datum", datumString), + zap.Binary("sampleBytes", sampleBytes), + zap.Binary("topNBytes", topNList[j].Encoded), + ) + // NOTE: if we don't return here, we may meet panic in the following code. + // The i may decrease to a negative value. + // We haven't fix the issue here, because we don't know how to + // remove the invalid sample data from the samples. + break + } + // First time to find the same value in topN: need to record the sample data for debugging. + firstTimeSample = samples[i].Value + // Found the same value in topn: need to skip over this value in samples. copy(samples[i:], samples[uint64(i)+topNList[j].Count:]) samples = samples[:uint64(len(samples))-topNList[j].Count] i-- + foundTwice = true continue } } diff --git a/pkg/statistics/cmsketch.go b/pkg/statistics/cmsketch.go index 3f246d76bd70c..9643e6b7188b2 100644 --- a/pkg/statistics/cmsketch.go +++ b/pkg/statistics/cmsketch.go @@ -23,8 +23,6 @@ import ( "slices" "sort" "strings" - "sync/atomic" - "time" "github.com/pingcap/errors" "github.com/pingcap/failpoint" @@ -655,6 +653,9 @@ func (c *TopN) FindTopN(d []byte) int { if bytes.Compare(c.TopN[len(c.TopN)-1].Encoded, d) < 0 { return -1 } + if bytes.Compare(c.TopN[0].Encoded, d) > 0 { + return -1 + } idx, match := slices.BinarySearchFunc(c.TopN, d, func(a TopNMeta, b []byte) int { return bytes.Compare(a.Encoded, b) }) @@ -795,84 +796,6 @@ func NewTopN(n int) *TopN { return &TopN{TopN: make([]TopNMeta, 0, n)} } -// MergePartTopN2GlobalTopN is used to merge the partition-level topN to global-level topN. -// The input parameters: -// 1. `topNs` are the partition-level topNs to be merged. -// 2. `n` is the size of the global-level topN. Notice: This value can be 0 and has no default value, we must explicitly specify this value. -// 3. `hists` are the partition-level histograms. Some values not in topN may be placed in the histogram. We need it here to make the value in the global-level TopN more accurate. -// -// The output parameters: -// 1. `*TopN` is the final global-level topN. -// 2. `[]TopNMeta` is the left topN value from the partition-level TopNs, but is not placed to global-level TopN. We should put them back to histogram latter. -// 3. `[]*Histogram` are the partition-level histograms which just delete some values when we merge the global-level topN. -func MergePartTopN2GlobalTopN(loc *time.Location, version int, topNs []*TopN, n uint32, hists []*Histogram, - isIndex bool, killed *uint32) (*TopN, []TopNMeta, []*Histogram, error) { - if CheckEmptyTopNs(topNs) { - return nil, nil, hists, nil - } - partNum := len(topNs) - // Different TopN structures may hold the same value, we have to merge them. - counter := make(map[hack.MutableString]float64) - // datumMap is used to store the mapping from the string type to datum type. - // The datum is used to find the value in the histogram. - datumMap := NewDatumMapCache() - for i, topN := range topNs { - if atomic.LoadUint32(killed) == 1 { - return nil, nil, nil, errors.Trace(ErrQueryInterrupted) - } - if topN.TotalCount() == 0 { - continue - } - for _, val := range topN.TopN { - encodedVal := hack.String(val.Encoded) - _, exists := counter[encodedVal] - counter[encodedVal] += float64(val.Count) - if exists { - // We have already calculated the encodedVal from the histogram, so just continue to next topN value. - continue - } - // We need to check whether the value corresponding to encodedVal is contained in other partition-level stats. - // 1. Check the topN first. - // 2. If the topN doesn't contain the value corresponding to encodedVal. We should check the histogram. - for j := 0; j < partNum; j++ { - if atomic.LoadUint32(killed) == 1 { - return nil, nil, nil, errors.Trace(ErrQueryInterrupted) - } - if (j == i && version >= 2) || topNs[j].FindTopN(val.Encoded) != -1 { - continue - } - // Get the encodedVal from the hists[j] - datum, exists := datumMap.Get(encodedVal) - if !exists { - d, err := datumMap.Put(val, encodedVal, hists[0].Tp.GetType(), isIndex, loc) - if err != nil { - return nil, nil, nil, err - } - datum = d - } - // Get the row count which the value is equal to the encodedVal from histogram. - count, _ := hists[j].EqualRowCount(nil, datum, isIndex) - if count != 0 { - counter[encodedVal] += count - // Remove the value corresponding to encodedVal from the histogram. - hists[j].BinarySearchRemoveVal(TopNMeta{Encoded: datum.GetBytes(), Count: uint64(count)}) - } - } - } - } - numTop := len(counter) - if numTop == 0 { - return nil, nil, hists, nil - } - sorted := make([]TopNMeta, 0, numTop) - for value, cnt := range counter { - data := hack.Slice(string(value)) - sorted = append(sorted, TopNMeta{Encoded: data, Count: uint64(cnt)}) - } - globalTopN, leftTopN := GetMergedTopNFromSortedSlice(sorted, n) - return globalTopN, leftTopN, hists, nil -} - // MergeTopN is used to merge more TopN structures to generate a new TopN struct by the given size. // The input parameters are multiple TopN structures to be merged and the size of the new TopN that will be generated. // The output parameters are the newly generated TopN structure and the remaining numbers. @@ -905,14 +828,12 @@ func MergeTopN(topNs []*TopN, n uint32) (*TopN, []TopNMeta) { // CheckEmptyTopNs checks whether all TopNs are empty. func CheckEmptyTopNs(topNs []*TopN) bool { - count := uint64(0) for _, topN := range topNs { - count += topN.TotalCount() - if count != 0 { + if topN.TotalCount() != 0 { return false } } - return count == 0 + return true } // SortTopnMeta sort topnMeta diff --git a/pkg/statistics/cmsketch_test.go b/pkg/statistics/cmsketch_test.go index 9e221645f221e..7cbdfc62450d3 100644 --- a/pkg/statistics/cmsketch_test.go +++ b/pkg/statistics/cmsketch_test.go @@ -19,11 +19,9 @@ import ( "math" "math/rand" "testing" - "time" "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" @@ -256,39 +254,6 @@ func TestCMSketchCodingTopN(t *testing.T) { require.NoError(t, err) } -func TestMergePartTopN2GlobalTopNWithoutHists(t *testing.T) { - loc := time.UTC - sc := stmtctx.NewStmtCtxWithTimeZone(loc) - version := 1 - isKilled := uint32(0) - - // Prepare TopNs. - topNs := make([]*TopN, 0, 10) - for i := 0; i < 10; i++ { - // Construct TopN, should be key(1, 1) -> 2, key(1, 2) -> 2, key(1, 3) -> 3. - topN := NewTopN(3) - { - key1, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1), types.NewIntDatum(1)) - require.NoError(t, err) - topN.AppendTopN(key1, 2) - key2, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1), types.NewIntDatum(2)) - require.NoError(t, err) - topN.AppendTopN(key2, 2) - key3, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1), types.NewIntDatum(3)) - require.NoError(t, err) - topN.AppendTopN(key3, 3) - } - topNs = append(topNs, topN) - } - - // Test merge 2 topN with nil hists. - globalTopN, leftTopN, _, err := MergePartTopN2GlobalTopN(loc, version, topNs, 2, nil, false, &isKilled) - require.NoError(t, err) - require.Len(t, globalTopN.TopN, 2, "should only have 2 topN") - require.Equal(t, uint64(50), globalTopN.TotalCount(), "should have 50 rows") - require.Len(t, leftTopN, 1, "should have 1 left topN") -} - func TestSortTopnMeta(t *testing.T) { data := []TopNMeta{{ Encoded: []byte("a"), @@ -300,54 +265,3 @@ func TestSortTopnMeta(t *testing.T) { SortTopnMeta(data) require.Equal(t, uint64(2), data[0].Count) } - -func TestMergePartTopN2GlobalTopNWithHists(t *testing.T) { - loc := time.UTC - sc := stmtctx.NewStmtCtxWithTimeZone(loc) - version := 1 - isKilled := uint32(0) - - // Prepare TopNs. - topNs := make([]*TopN, 0, 10) - for i := 0; i < 10; i++ { - // Construct TopN, should be key1 -> 2, key2 -> 2, key3 -> 3. - topN := NewTopN(3) - { - key1, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1)) - require.NoError(t, err) - topN.AppendTopN(key1, 2) - key2, err := codec.EncodeKey(sc, nil, types.NewIntDatum(2)) - require.NoError(t, err) - topN.AppendTopN(key2, 2) - if i%2 == 0 { - key3, err := codec.EncodeKey(sc, nil, types.NewIntDatum(3)) - require.NoError(t, err) - topN.AppendTopN(key3, 3) - } - } - topNs = append(topNs, topN) - } - - // Prepare Hists. - hists := make([]*Histogram, 0, 10) - for i := 0; i < 10; i++ { - // Construct Hist - h := NewHistogram(1, 10, 0, 0, types.NewFieldType(mysql.TypeTiny), chunk.InitialCapacity, 0) - h.Bounds.AppendInt64(0, 1) - h.Buckets = append(h.Buckets, Bucket{Repeat: 10, Count: 20}) - h.Bounds.AppendInt64(0, 2) - h.Buckets = append(h.Buckets, Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 3) - h.Buckets = append(h.Buckets, Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 4) - h.Buckets = append(h.Buckets, Bucket{Repeat: 10, Count: 40}) - hists = append(hists, h) - } - - // Test merge 2 topN. - globalTopN, leftTopN, _, err := MergePartTopN2GlobalTopN(loc, version, topNs, 2, hists, false, &isKilled) - require.NoError(t, err) - require.Len(t, globalTopN.TopN, 2, "should only have 2 topN") - require.Equal(t, uint64(55), globalTopN.TotalCount(), "should have 55") - require.Len(t, leftTopN, 1, "should have 1 left topN") -} diff --git a/pkg/statistics/handle/autoanalyze/BUILD.bazel b/pkg/statistics/handle/autoanalyze/BUILD.bazel index 3952d803d4b5d..c27b9ca8fd85e 100644 --- a/pkg/statistics/handle/autoanalyze/BUILD.bazel +++ b/pkg/statistics/handle/autoanalyze/BUILD.bazel @@ -18,6 +18,7 @@ go_library( "//pkg/util", "//pkg/util/chunk", "//pkg/util/logutil", + "//pkg/util/sqlescape", "//pkg/util/sqlexec", "//pkg/util/timeutil", "@com_github_pingcap_errors//:errors", diff --git a/pkg/statistics/handle/autoanalyze/autoanalyze.go b/pkg/statistics/handle/autoanalyze/autoanalyze.go index 8b1d8dea852df..0592ba2c19ff8 100644 --- a/pkg/statistics/handle/autoanalyze/autoanalyze.go +++ b/pkg/statistics/handle/autoanalyze/autoanalyze.go @@ -36,6 +36,7 @@ import ( "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "github.com/pingcap/tidb/pkg/util/timeutil" "go.uber.org/zap" @@ -247,7 +248,7 @@ func autoAnalyzeTable(sctx sessionctx.Context, return false } if needAnalyze, reason := NeedAnalyzeTable(statsTbl, 20*statsHandle.Lease(), ratio); needAnalyze { - escaped, err := sqlexec.EscapeSQL(sql, params...) + escaped, err := sqlescape.EscapeSQL(sql, params...) if err != nil { return false } @@ -261,7 +262,7 @@ func autoAnalyzeTable(sctx sessionctx.Context, if _, ok := statsTbl.Indices[idx.ID]; !ok && idx.State == model.StatePublic { sqlWithIdx := sql + " index %n" paramsWithIdx := append(params, idx.Name.O) - escaped, err := sqlexec.EscapeSQL(sqlWithIdx, paramsWithIdx...) + escaped, err := sqlescape.EscapeSQL(sqlWithIdx, paramsWithIdx...) if err != nil { return false } @@ -428,7 +429,7 @@ func execAutoAnalyze(sctx sessionctx.Context, dur := time.Since(startTime) metrics.AutoAnalyzeHistogram.Observe(dur.Seconds()) if err != nil { - escaped, err1 := sqlexec.EscapeSQL(sql, params...) + escaped, err1 := sqlescape.EscapeSQL(sql, params...) if err1 != nil { escaped = "" } diff --git a/pkg/statistics/handle/bootstrap.go b/pkg/statistics/handle/bootstrap.go index 1e49bec642bef..83dd83d1a209a 100644 --- a/pkg/statistics/handle/bootstrap.go +++ b/pkg/statistics/handle/bootstrap.go @@ -405,17 +405,16 @@ func (*Handle) initStatsBuckets4Chunk(cache util.StatsCache, iter *chunk.Iterato // Setting TimeZone to time.UTC aligns with HistogramFromStorage and can fix #41938. However, #41985 still exist. // TODO: do the correct time zone conversion for timestamp-type columns' upper/lower bounds. sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - sc.AllowInvalidDate = true - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreInvalidDateErr(true).WithIgnoreZeroInDate(true)) var err error - lower, err = d.ConvertTo(sc, &column.Info.FieldType) + lower, err = d.ConvertTo(sc.TypeCtx(), &column.Info.FieldType) if err != nil { logutil.BgLogger().Debug("decode bucket lower bound failed", zap.Error(err)) delete(table.Columns, histID) continue } d = types.NewBytesDatum(row.GetBytes(6)) - upper, err = d.ConvertTo(sc, &column.Info.FieldType) + upper, err = d.ConvertTo(sc.TypeCtx(), &column.Info.FieldType) if err != nil { logutil.BgLogger().Debug("decode bucket upper bound failed", zap.Error(err)) delete(table.Columns, histID) diff --git a/pkg/statistics/handle/globalstats/BUILD.bazel b/pkg/statistics/handle/globalstats/BUILD.bazel index 8cbfd4a96040f..efd55906af12b 100644 --- a/pkg/statistics/handle/globalstats/BUILD.bazel +++ b/pkg/statistics/handle/globalstats/BUILD.bazel @@ -23,6 +23,7 @@ go_library( "//pkg/types", "//pkg/util/hack", "//pkg/util/logutil", + "//pkg/util/sqlkiller", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", "@com_github_tiancaiamao_gp//:gp", @@ -38,11 +39,12 @@ go_test( "globalstats_test.go", "main_test.go", "topn_bench_test.go", + "topn_test.go", ], + embed = [":globalstats"], flaky = True, - shard_count = 18, + shard_count = 21, deps = [ - ":globalstats", "//pkg/config", "//pkg/parser/model", "//pkg/parser/mysql", @@ -53,6 +55,7 @@ go_test( "//pkg/types", "//pkg/util/chunk", "//pkg/util/codec", + "//pkg/util/sqlkiller", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", "@com_github_tiancaiamao_gp//:gp", diff --git a/pkg/statistics/handle/globalstats/global_stats.go b/pkg/statistics/handle/globalstats/global_stats.go index 6fc7e94ee8487..d5abb16a03ee1 100644 --- a/pkg/statistics/handle/globalstats/global_stats.go +++ b/pkg/statistics/handle/globalstats/global_stats.go @@ -15,6 +15,8 @@ package globalstats import ( + "fmt" + "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/infoschema" "github.com/pingcap/tidb/pkg/parser/ast" @@ -23,7 +25,9 @@ import ( "github.com/pingcap/tidb/pkg/sessiontxn" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/statistics/handle/util" + "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/tiancaiamao/gp" "go.uber.org/zap" ) @@ -99,15 +103,18 @@ func MergePartitionStats2GlobalStats( isIndex bool, histIDs []int64, ) (globalStats *GlobalStats, err error) { - worker, err := NewAsyncMergePartitionStats2GlobalStats(statsHandle, globalTableInfo, histIDs, is) - if err != nil { - return nil, errors.Trace(err) - } - err = worker.MergePartitionStats2GlobalStats(sc, opts, isIndex) - if err != nil { - return nil, errors.Trace(err) + if sc.GetSessionVars().EnableAsyncMergeGlobalStats { + worker, err := NewAsyncMergePartitionStats2GlobalStats(statsHandle, globalTableInfo, histIDs, is) + if err != nil { + return nil, errors.Trace(err) + } + err = worker.MergePartitionStats2GlobalStats(sc, opts, isIndex) + if err != nil { + return nil, errors.Trace(err) + } + return worker.Result(), nil } - return worker.Result(), nil + return blockingMergePartitionStats2GlobalStats(sc, statsHandle.GPool(), opts, is, globalTableInfo, isIndex, histIDs, nil, statsHandle) } // MergePartitionStats2GlobalStatsByTableID merge the partition-level stats to global-level stats based on the tableID. @@ -263,3 +270,196 @@ func UpdateGlobalStats( } return nil } + +// blockingMergePartitionStats2GlobalStats merge the partition-level stats to global-level stats based on the tableInfo. +// It is the old algorithm to merge partition-level stats to global-level stats. It will happen the OOM. because it will load all the partition-level stats into memory. +func blockingMergePartitionStats2GlobalStats( + sc sessionctx.Context, + gpool *gp.Pool, + opts map[ast.AnalyzeOptionType]uint64, + is infoschema.InfoSchema, + globalTableInfo *model.TableInfo, + isIndex bool, + histIDs []int64, + allPartitionStats map[int64]*statistics.Table, + statsHandle util.StatsHandle, +) (globalStats *GlobalStats, err error) { + externalCache := false + if allPartitionStats != nil { + externalCache = true + } + + partitionNum := len(globalTableInfo.Partition.Definitions) + if len(histIDs) == 0 { + for _, col := range globalTableInfo.Columns { + // The virtual generated column stats can not be merged to the global stats. + if col.IsVirtualGenerated() { + continue + } + histIDs = append(histIDs, col.ID) + } + } + + // Initialized the globalStats. + globalStats = newGlobalStats(len(histIDs)) + + // Slice Dimensions Explanation + // First dimension: Column or Index Stats + // Second dimension: Partition Tables + // Because all topN and histograms need to be collected before they can be merged. + // So we should store all the partition-level stats first, and merge them together. + allHg := make([][]*statistics.Histogram, globalStats.Num) + allCms := make([][]*statistics.CMSketch, globalStats.Num) + allTopN := make([][]*statistics.TopN, globalStats.Num) + allFms := make([][]*statistics.FMSketch, globalStats.Num) + for i := 0; i < globalStats.Num; i++ { + allHg[i] = make([]*statistics.Histogram, 0, partitionNum) + allCms[i] = make([]*statistics.CMSketch, 0, partitionNum) + allTopN[i] = make([]*statistics.TopN, 0, partitionNum) + allFms[i] = make([]*statistics.FMSketch, 0, partitionNum) + } + + skipMissingPartitionStats := sc.GetSessionVars().SkipMissingPartitionStats + for _, def := range globalTableInfo.Partition.Definitions { + partitionID := def.ID + partitionTable, ok := statsHandle.TableInfoByID(is, partitionID) + if !ok { + err = errors.Errorf("unknown physical ID %d in stats meta table, maybe it has been dropped", partitionID) + return + } + tableInfo := partitionTable.Meta() + var partitionStats *statistics.Table + var okLoad bool + if allPartitionStats != nil { + partitionStats, okLoad = allPartitionStats[partitionID] + } else { + okLoad = false + } + // If pre-load partition stats isn't provided, then we load partition stats directly and set it into allPartitionStats + if !okLoad { + var err1 error + partitionStats, err1 = statsHandle.LoadTablePartitionStats(tableInfo, &def) + if err1 != nil { + if skipMissingPartitionStats && types.ErrPartitionStatsMissing.Equal(err1) { + globalStats.MissingPartitionStats = append(globalStats.MissingPartitionStats, fmt.Sprintf("partition `%s`", def.Name.L)) + continue + } + err = err1 + return + } + if externalCache { + allPartitionStats[partitionID] = partitionStats + } + } + + for i := 0; i < globalStats.Num; i++ { + // GetStatsInfo will return the copy of the statsInfo, so we don't need to worry about the data race. + // partitionStats will be released after the for loop. + hg, cms, topN, fms, analyzed := partitionStats.GetStatsInfo(histIDs[i], isIndex, externalCache) + skipPartition := false + if !analyzed { + var missingPart string + if !isIndex { + missingPart = fmt.Sprintf("partition `%s` column `%s`", def.Name.L, tableInfo.FindColumnNameByID(histIDs[i])) + } else { + missingPart = fmt.Sprintf("partition `%s` index `%s`", def.Name.L, tableInfo.FindIndexNameByID(histIDs[i])) + } + if !skipMissingPartitionStats { + err = types.ErrPartitionStatsMissing.GenWithStackByArgs(fmt.Sprintf("table `%s` %s", tableInfo.Name.L, missingPart)) + return + } + globalStats.MissingPartitionStats = append(globalStats.MissingPartitionStats, missingPart) + skipPartition = true + } + + // Partition stats is not empty but column stats(hist, topN) is missing. + if partitionStats.RealtimeCount > 0 && (hg == nil || hg.TotalRowCount() <= 0) && (topN == nil || topN.TotalCount() <= 0) { + var missingPart string + if !isIndex { + missingPart = fmt.Sprintf("partition `%s` column `%s`", def.Name.L, tableInfo.FindColumnNameByID(histIDs[i])) + } else { + missingPart = fmt.Sprintf("partition `%s` index `%s`", def.Name.L, tableInfo.FindIndexNameByID(histIDs[i])) + } + if !skipMissingPartitionStats { + err = types.ErrPartitionColumnStatsMissing.GenWithStackByArgs(fmt.Sprintf("table `%s` %s", tableInfo.Name.L, missingPart)) + return + } + globalStats.MissingPartitionStats = append(globalStats.MissingPartitionStats, missingPart+" hist and topN") + skipPartition = true + } + + if i == 0 { + // In a partition, we will only update globalStats.Count once. + globalStats.Count += partitionStats.RealtimeCount + globalStats.ModifyCount += partitionStats.ModifyCount + } + + if !skipPartition { + allHg[i] = append(allHg[i], hg) + allCms[i] = append(allCms[i], cms) + allTopN[i] = append(allTopN[i], topN) + allFms[i] = append(allFms[i], fms) + } + } + } + + // After collect all the statistics from the partition-level stats, + // we should merge them together. + for i := 0; i < globalStats.Num; i++ { + if len(allHg[i]) == 0 { + // If all partitions have no stats, we skip merging global stats because it may not handle the case `len(allHg[i]) == 0` + // correctly. It can avoid unexpected behaviors such as nil pointer panic. + continue + } + // FMSketch use many memory, so we first deal with it and then destroy it. + // Merge FMSketch. + globalStats.Fms[i] = allFms[i][0] + for j := 1; j < len(allFms[i]); j++ { + globalStats.Fms[i].MergeFMSketch(allFms[i][j]) + allFms[i][j].DestroyAndPutToPool() + } + + // Update the global NDV. + globalStatsNDV := globalStats.Fms[i].NDV() + if globalStatsNDV > globalStats.Count { + globalStatsNDV = globalStats.Count + } + globalStats.Fms[i].DestroyAndPutToPool() + + // Merge CMSketch. + globalStats.Cms[i] = allCms[i][0] + for j := 1; j < len(allCms[i]); j++ { + err = globalStats.Cms[i].MergeCMSketch(allCms[i][j]) + if err != nil { + return + } + } + + // Merge topN. + // Note: We need to merge TopN before merging the histogram. + // Because after merging TopN, some numbers will be left. + // These remaining topN numbers will be used as a separate bucket for later histogram merging. + var poppedTopN []statistics.TopNMeta + wrapper := NewStatsWrapper(allHg[i], allTopN[i]) + globalStats.TopN[i], poppedTopN, allHg[i], err = mergeGlobalStatsTopN(gpool, sc, wrapper, + sc.GetSessionVars().StmtCtx.TimeZone(), sc.GetSessionVars().AnalyzeVersion, uint32(opts[ast.AnalyzeOptNumTopN]), isIndex) + if err != nil { + return + } + + // Merge histogram. + globalStats.Hg[i], err = statistics.MergePartitionHist2GlobalHist(sc.GetSessionVars().StmtCtx, allHg[i], poppedTopN, + int64(opts[ast.AnalyzeOptNumBuckets]), isIndex) + if err != nil { + return + } + + // NOTICE: after merging bucket NDVs have the trend to be underestimated, so for safe we don't use them. + for j := range globalStats.Hg[i].Buckets { + globalStats.Hg[i].Buckets[j].NDV = 0 + } + + globalStats.Hg[i].NDV = globalStatsNDV + } + return +} diff --git a/pkg/statistics/handle/globalstats/globalstats_test.go b/pkg/statistics/handle/globalstats/globalstats_test.go index 84a534517dd0a..69b5c3d81445b 100644 --- a/pkg/statistics/handle/globalstats/globalstats_test.go +++ b/pkg/statistics/handle/globalstats/globalstats_test.go @@ -26,11 +26,24 @@ import ( "github.com/stretchr/testify/require" ) -func TestShowGlobalStats(t *testing.T) { +func TestShowGlobalStatsWithAsyncMergeGlobal(t *testing.T) { + testShowGlobalStats(t, true) +} + +func TestShowGlobalStatsWithoutAsyncMergeGlobal(t *testing.T) { + testShowGlobalStats(t, false) +} + +func testShowGlobalStats(t *testing.T, isAsync bool) { store := testkit.CreateMockStore(t) tk := testkit.NewTestKit(t, store) tk.MustExec("use test") tk.MustExec("set @@session.tidb_analyze_version = 0") + if isAsync { + tk.MustExec("set @@global.tidb_enable_async_merge_global_stats = 0") + } else { + tk.MustExec("set @@global.tidb_enable_async_merge_global_stats = 1") + } tk.MustExec("drop table if exists t") tk.MustExec("set @@tidb_partition_prune_mode = 'static'") tk.MustExec("create table t (a int, key(a)) partition by hash(a) partitions 2") diff --git a/pkg/statistics/handle/globalstats/merge_worker.go b/pkg/statistics/handle/globalstats/merge_worker.go index 74600eb7acffb..7813e6c90c289 100644 --- a/pkg/statistics/handle/globalstats/merge_worker.go +++ b/pkg/statistics/handle/globalstats/merge_worker.go @@ -16,12 +16,11 @@ package globalstats import ( "sync" - "sync/atomic" "time" - "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/statistics" "github.com/pingcap/tidb/pkg/util/hack" + "github.com/pingcap/tidb/pkg/util/sqlkiller" ) // StatsWrapper wrapper stats @@ -39,7 +38,7 @@ func NewStatsWrapper(hg []*statistics.Histogram, topN []*statistics.TopN) *Stats } type topnStatsMergeWorker struct { - killed *uint32 + killer *sqlkiller.SQLKiller taskCh <-chan *TopnStatsMergeTask respCh chan<- *TopnStatsMergeResponse // the stats in the wrapper should only be read during the worker @@ -53,14 +52,14 @@ func NewTopnStatsMergeWorker( taskCh <-chan *TopnStatsMergeTask, respCh chan<- *TopnStatsMergeResponse, wrapper *StatsWrapper, - killed *uint32) *topnStatsMergeWorker { + killer *sqlkiller.SQLKiller) *topnStatsMergeWorker { worker := &topnStatsMergeWorker{ taskCh: taskCh, respCh: respCh, } worker.statsWrapper = wrapper worker.shardMutex = make([]sync.Mutex, len(wrapper.AllHg)) - worker.killed = killed + worker.killer = killer return worker } @@ -108,8 +107,8 @@ func (worker *topnStatsMergeWorker) Run(timeZone *time.Location, isIndex bool, datumMap := statistics.NewDatumMapCache() for i, topN := range checkTopNs { - if atomic.LoadUint32(worker.killed) == 1 { - resp.Err = errors.Trace(statistics.ErrQueryInterrupted) + if err := worker.killer.HandleSignal(); err != nil { + resp.Err = err worker.respCh <- resp return } @@ -128,8 +127,8 @@ func (worker *topnStatsMergeWorker) Run(timeZone *time.Location, isIndex bool, // 1. Check the topN first. // 2. If the topN doesn't contain the value corresponding to encodedVal. We should check the histogram. for j := 0; j < partNum; j++ { - if atomic.LoadUint32(worker.killed) == 1 { - resp.Err = errors.Trace(statistics.ErrQueryInterrupted) + if err := worker.killer.HandleSignal(); err != nil { + resp.Err = err worker.respCh <- resp return } diff --git a/pkg/statistics/handle/globalstats/topn.go b/pkg/statistics/handle/globalstats/topn.go index 8251070f42e1d..9e9f14a068a54 100644 --- a/pkg/statistics/handle/globalstats/topn.go +++ b/pkg/statistics/handle/globalstats/topn.go @@ -22,6 +22,8 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics" + "github.com/pingcap/tidb/pkg/util/hack" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/tiancaiamao/gp" ) @@ -29,10 +31,10 @@ func mergeGlobalStatsTopN(gp *gp.Pool, sc sessionctx.Context, wrapper *StatsWrap timeZone *time.Location, version int, n uint32, isIndex bool) (*statistics.TopN, []statistics.TopNMeta, []*statistics.Histogram, error) { mergeConcurrency := sc.GetSessionVars().AnalyzePartitionMergeConcurrency - killed := &sc.GetSessionVars().Killed + killer := &sc.GetSessionVars().SQLKiller // use original method if concurrency equals 1 or for version1 if mergeConcurrency < 2 { - return statistics.MergePartTopN2GlobalTopN(timeZone, version, wrapper.AllTopN, n, wrapper.AllHg, isIndex, killed) + return MergePartTopN2GlobalTopN(timeZone, version, wrapper.AllTopN, n, wrapper.AllHg, isIndex, killer) } batchSize := len(wrapper.AllTopN) / mergeConcurrency if batchSize < 1 { @@ -40,15 +42,24 @@ func mergeGlobalStatsTopN(gp *gp.Pool, sc sessionctx.Context, wrapper *StatsWrap } else if batchSize > MaxPartitionMergeBatchSize { batchSize = MaxPartitionMergeBatchSize } - return MergeGlobalStatsTopNByConcurrency(gp, mergeConcurrency, batchSize, wrapper, timeZone, version, n, isIndex, killed) + return MergeGlobalStatsTopNByConcurrency(gp, mergeConcurrency, batchSize, wrapper, timeZone, version, n, isIndex, killer) } -// MergeGlobalStatsTopNByConcurrency merge partition topN by concurrency -// To merge global stats topn by concurrency, we will separate the partition topn in concurrency part and deal it with different worker. -// mergeConcurrency is used to control the total concurrency of the running worker, and mergeBatchSize is sued to control -// the partition size for each worker to solve it -func MergeGlobalStatsTopNByConcurrency(gp *gp.Pool, mergeConcurrency, mergeBatchSize int, wrapper *StatsWrapper, - timeZone *time.Location, version int, n uint32, isIndex bool, killed *uint32) (*statistics.TopN, +// MergeGlobalStatsTopNByConcurrency merge partition topN by concurrency. +// To merge global stats topN by concurrency, +// we will separate the partition topN in concurrency part and deal it with different worker. +// mergeConcurrency is used to control the total concurrency of the running worker, +// and mergeBatchSize is sued to control the partition size for each worker to solve it +func MergeGlobalStatsTopNByConcurrency( + gp *gp.Pool, + mergeConcurrency, mergeBatchSize int, + wrapper *StatsWrapper, + timeZone *time.Location, + version int, + n uint32, + isIndex bool, + killer *sqlkiller.SQLKiller, +) (*statistics.TopN, []statistics.TopNMeta, []*statistics.Histogram, error) { if len(wrapper.AllTopN) < mergeConcurrency { mergeConcurrency = len(wrapper.AllTopN) @@ -68,7 +79,7 @@ func MergeGlobalStatsTopNByConcurrency(gp *gp.Pool, mergeConcurrency, mergeBatch taskCh := make(chan *TopnStatsMergeTask, taskNum) respCh := make(chan *TopnStatsMergeResponse, taskNum) for i := 0; i < mergeConcurrency; i++ { - worker := NewTopnStatsMergeWorker(taskCh, respCh, wrapper, killed) + worker := NewTopnStatsMergeWorker(taskCh, respCh, wrapper, killer) wg.Add(1) gp.Go(func() { defer wg.Done() @@ -113,3 +124,99 @@ func MergeGlobalStatsTopNByConcurrency(gp *gp.Pool, mergeConcurrency, mergeBatch statistics.SortTopnMeta(result) return globalTopN, result, wrapper.AllHg, nil } + +// MergePartTopN2GlobalTopN is used to merge the partition-level topN to global-level topN. +// The input parameters: +// 1. `topNs` are the partition-level topNs to be merged. +// 2. `n` is the size of the global-level topN. +// Notice: This value can be 0 and has no default value, we must explicitly specify this value. +// 3. `hists` are the partition-level histograms. +// Some values not in topN may be placed in the histogram. +// We need it here to make the value in the global-level TopN more accurate. +// +// The output parameters: +// 1. `*TopN` is the final global-level topN. +// 2. `[]TopNMeta` is the left topN value from the partition-level TopNs, +// but is not placed to global-level TopN. We should put them back to histogram latter. +// 3. `[]*Histogram` are the partition-level histograms which +// just delete some values when we merge the global-level topN. +func MergePartTopN2GlobalTopN( + loc *time.Location, + version int, + topNs []*statistics.TopN, + n uint32, + hists []*statistics.Histogram, + isIndex bool, + killer *sqlkiller.SQLKiller, +) (*statistics.TopN, []statistics.TopNMeta, []*statistics.Histogram, error) { + if statistics.CheckEmptyTopNs(topNs) { + return nil, nil, hists, nil + } + + partNum := len(topNs) + // Different TopN structures may hold the same value, we have to merge them. + counter := make(map[hack.MutableString]float64) + // datumMap is used to store the mapping from the string type to datum type. + // The datum is used to find the value in the histogram. + datumMap := statistics.NewDatumMapCache() + for i, topN := range topNs { + if err := killer.HandleSignal(); err != nil { + return nil, nil, nil, err + } + // Ignore the empty topN. + if topN.TotalCount() == 0 { + continue + } + + for _, val := range topN.TopN { + encodedVal := hack.String(val.Encoded) + _, exists := counter[encodedVal] + counter[encodedVal] += float64(val.Count) + if exists { + // We have already calculated the encodedVal from the histogram, so just continue to next topN value. + continue + } + + // We need to check whether the value corresponding to encodedVal is contained in other partition-level stats. + // 1. Check the topN first. + // 2. If the topN doesn't contain the value corresponding to encodedVal. We should check the histogram. + for j := 0; j < partNum; j++ { + if err := killer.HandleSignal(); err != nil { + return nil, nil, nil, err + } + + if (j == i && version >= 2) || topNs[j].FindTopN(val.Encoded) != -1 { + continue + } + // Get the encodedVal from the hists[j] + datum, exists := datumMap.Get(encodedVal) + if !exists { + d, err := datumMap.Put(val, encodedVal, hists[0].Tp.GetType(), isIndex, loc) + if err != nil { + return nil, nil, nil, err + } + datum = d + } + // Get the row count which the value is equal to the encodedVal from histogram. + count, _ := hists[j].EqualRowCount(nil, datum, isIndex) + if count != 0 { + counter[encodedVal] += count + // Remove the value corresponding to encodedVal from the histogram. + hists[j].BinarySearchRemoveVal(statistics.TopNMeta{Encoded: datum.GetBytes(), Count: uint64(count)}) + } + } + } + } + + numTop := len(counter) + if numTop == 0 { + return nil, nil, hists, nil + } + sorted := make([]statistics.TopNMeta, 0, numTop) + for value, cnt := range counter { + data := hack.Slice(string(value)) + sorted = append(sorted, statistics.TopNMeta{Encoded: data, Count: uint64(cnt)}) + } + globalTopN, leftTopN := statistics.GetMergedTopNFromSortedSlice(sorted, n) + return globalTopN, leftTopN, hists, nil +} diff --git a/pkg/statistics/handle/globalstats/topn_bench_test.go b/pkg/statistics/handle/globalstats/topn_bench_test.go index 50c17ef147255..a272bfbd4bfee 100644 --- a/pkg/statistics/handle/globalstats/topn_bench_test.go +++ b/pkg/statistics/handle/globalstats/topn_bench_test.go @@ -12,47 +12,41 @@ // See the License for the specific language governing permissions and // limitations under the License. -package globalstats_test +package globalstats import ( "fmt" + "math/rand" "testing" "time" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/statistics" - "github.com/pingcap/tidb/pkg/statistics/handle/globalstats" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/stretchr/testify/require" "github.com/tiancaiamao/gp" ) -// cmd: go test -run=^$ -bench=BenchmarkMergePartTopN2GlobalTopNWithHists -benchmem github.com/pingcap/tidb/pkg/statistics/handle/globalstats -func benchmarkMergePartTopN2GlobalTopNWithHists(partitions int, b *testing.B) { - loc := time.UTC - sc := stmtctx.NewStmtCtxWithTimeZone(loc) - version := 1 - isKilled := uint32(0) - +func prepareTopNsAndHists(b *testing.B, partitions int, tz *time.Location) ([]*statistics.TopN, []*statistics.Histogram) { + sc := stmtctx.NewStmtCtxWithTimeZone(tz) // Prepare TopNs. topNs := make([]*statistics.TopN, 0, partitions) for i := 0; i < partitions; i++ { - // Construct TopN, should be key1 -> 2, key2 -> 2, key3 -> 3. - topN := statistics.NewTopN(3) + // Construct TopN, should be key1 -> rand(0, 1000), key2 -> rand(0, 1000), key3 -> rand(0, 1000)... + topN := statistics.NewTopN(500) { - key1, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1)) - require.NoError(b, err) - topN.AppendTopN(key1, 2) - key2, err := codec.EncodeKey(sc, nil, types.NewIntDatum(2)) - require.NoError(b, err) - topN.AppendTopN(key2, 2) - if i%2 == 0 { - key3, err := codec.EncodeKey(sc, nil, types.NewIntDatum(3)) + for j := 1; j <= 500; j++ { + // Randomly skip some keys for some partitions. + if i%2 == 0 && j%2 == 0 { + continue + } + key, err := codec.EncodeKey(sc, nil, types.NewIntDatum(int64(j))) require.NoError(b, err) - topN.AppendTopN(key3, 3) + topN.AppendTopN(key, uint64(rand.Intn(1000))) } } topNs = append(topNs, topN) @@ -62,98 +56,85 @@ func benchmarkMergePartTopN2GlobalTopNWithHists(partitions int, b *testing.B) { hists := make([]*statistics.Histogram, 0, partitions) for i := 0; i < partitions; i++ { // Construct Hist - h := statistics.NewHistogram(1, 10, 0, 0, types.NewFieldType(mysql.TypeTiny), chunk.InitialCapacity, 0) - h.Bounds.AppendInt64(0, 1) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 20}) - h.Bounds.AppendInt64(0, 2) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 3) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 4) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 40}) + h := statistics.NewHistogram(1, 500, 0, 0, types.NewFieldType(mysql.TypeTiny), chunk.InitialCapacity, 0) + for j := 1; j <= 500; j++ { + datum := types.NewIntDatum(int64(j)) + h.AppendBucket(&datum, &datum, int64(10+j*10), 10) + } hists = append(hists, h) } + return topNs, hists +} + +func benchmarkMergePartTopN2GlobalTopNWithHists(partitions int, b *testing.B) { + loc := time.UTC + version := 1 + killer := sqlkiller.SQLKiller{} + topNs, hists := prepareTopNsAndHists(b, partitions, loc) + b.ResetTimer() for i := 0; i < b.N; i++ { - // Benchmark merge 10 topN. - _, _, _, _ = statistics.MergePartTopN2GlobalTopN(loc, version, topNs, 10, hists, false, &isKilled) + // Benchmark merge 100 topN. + _, _, _, _ = MergePartTopN2GlobalTopN( + loc, + version, + topNs, + 100, + hists, + false, + &killer, + ) + } +} + +var benchmarkSizes = []int{100, 1000, 2000, 5000, 10000} + +// cmd: go test -run=^$ -bench=BenchmarkMergePartTopN2GlobalTopNWithHists -benchmem github.com/pingcap/tidb/pkg/statistics/handle/globalstats +func BenchmarkMergePartTopN2GlobalTopNWithHists(b *testing.B) { + for _, size := range benchmarkSizes { + b.Run(fmt.Sprintf("Size%d", size), func(b *testing.B) { + benchmarkMergePartTopN2GlobalTopNWithHists(size, b) + }) } } -// cmd: go test -run=^$ -bench=BenchmarkMergeGlobalStatsTopNByConcurrencyWithHists -benchmem github.com/pingcap/tidb/pkg/statistics/handle/globalstats func benchmarkMergeGlobalStatsTopNByConcurrencyWithHists(partitions int, b *testing.B) { loc := time.UTC - sc := stmtctx.NewStmtCtxWithTimeZone(loc) version := 1 - isKilled := uint32(0) - - // Prepare TopNs. - topNs := make([]*statistics.TopN, 0, partitions) - for i := 0; i < partitions; i++ { - // Construct TopN, should be key1 -> 2, key2 -> 2, key3 -> 3. - topN := statistics.NewTopN(3) - { - key1, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1)) - require.NoError(b, err) - topN.AppendTopN(key1, 2) - key2, err := codec.EncodeKey(sc, nil, types.NewIntDatum(2)) - require.NoError(b, err) - topN.AppendTopN(key2, 2) - if i%2 == 0 { - key3, err := codec.EncodeKey(sc, nil, types.NewIntDatum(3)) - require.NoError(b, err) - topN.AppendTopN(key3, 3) - } - } - topNs = append(topNs, topN) - } + killer := sqlkiller.SQLKiller{} - // Prepare Hists. - hists := make([]*statistics.Histogram, 0, partitions) - for i := 0; i < partitions; i++ { - // Construct Hist - h := statistics.NewHistogram(1, 10, 0, 0, types.NewFieldType(mysql.TypeTiny), chunk.InitialCapacity, 0) - h.Bounds.AppendInt64(0, 1) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 20}) - h.Bounds.AppendInt64(0, 2) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 3) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) - h.Bounds.AppendInt64(0, 4) - h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 40}) - hists = append(hists, h) - } - wrapper := globalstats.NewStatsWrapper(hists, topNs) + topNs, hists := prepareTopNsAndHists(b, partitions, loc) + wrapper := NewStatsWrapper(hists, topNs) const mergeConcurrency = 4 batchSize := len(wrapper.AllTopN) / mergeConcurrency if batchSize < 1 { batchSize = 1 - } else if batchSize > globalstats.MaxPartitionMergeBatchSize { - batchSize = globalstats.MaxPartitionMergeBatchSize + } else if batchSize > MaxPartitionMergeBatchSize { + batchSize = MaxPartitionMergeBatchSize } gpool := gp.New(mergeConcurrency, 5*time.Minute) defer gpool.Close() b.ResetTimer() for i := 0; i < b.N; i++ { - // Benchmark merge 10 topN. - _, _, _, _ = globalstats.MergeGlobalStatsTopNByConcurrency(gpool, mergeConcurrency, batchSize, wrapper, loc, version, 10, false, &isKilled) - } -} - -var benchmarkSizes = []int{100, 1000, 10000, 100000, 1000000, 10000000} -var benchmarkConcurrencySizes = []int{100, 1000, 10000, 100000} - -func BenchmarkMergePartTopN2GlobalTopNWithHists(b *testing.B) { - for _, size := range benchmarkSizes { - b.Run(fmt.Sprintf("Size%d", size), func(b *testing.B) { - benchmarkMergePartTopN2GlobalTopNWithHists(size, b) - }) + // Benchmark merge 100 topN. + _, _, _, _ = MergeGlobalStatsTopNByConcurrency( + gpool, + mergeConcurrency, + batchSize, + wrapper, + loc, + version, + 100, + false, + &killer, + ) } } +// cmd: go test -run=^$ -bench=BenchmarkMergeGlobalStatsTopNByConcurrencyWithHists -benchmem github.com/pingcap/tidb/pkg/statistics/handle/globalstats func BenchmarkMergeGlobalStatsTopNByConcurrencyWithHists(b *testing.B) { - for _, size := range benchmarkConcurrencySizes { + for _, size := range benchmarkSizes { b.Run(fmt.Sprintf("Size%d", size), func(b *testing.B) { benchmarkMergeGlobalStatsTopNByConcurrencyWithHists(size, b) }) diff --git a/pkg/statistics/handle/globalstats/topn_test.go b/pkg/statistics/handle/globalstats/topn_test.go new file mode 100644 index 0000000000000..9be370fc52032 --- /dev/null +++ b/pkg/statistics/handle/globalstats/topn_test.go @@ -0,0 +1,113 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package globalstats + +import ( + "testing" + "time" + + "github.com/pingcap/tidb/pkg/parser/mysql" + "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" + "github.com/pingcap/tidb/pkg/statistics" + "github.com/pingcap/tidb/pkg/types" + "github.com/pingcap/tidb/pkg/util/chunk" + "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/pkg/util/sqlkiller" + "github.com/stretchr/testify/require" +) + +func TestMergePartTopN2GlobalTopNWithoutHists(t *testing.T) { + loc := time.UTC + sc := stmtctx.NewStmtCtxWithTimeZone(loc) + version := 1 + killer := sqlkiller.SQLKiller{} + + // Prepare TopNs. + topNs := make([]*statistics.TopN, 0, 10) + for i := 0; i < 10; i++ { + // Construct TopN, should be key(1, 1) -> 2, key(1, 2) -> 2, key(1, 3) -> 3. + topN := statistics.NewTopN(3) + { + key1, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1), types.NewIntDatum(1)) + require.NoError(t, err) + topN.AppendTopN(key1, 2) + key2, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1), types.NewIntDatum(2)) + require.NoError(t, err) + topN.AppendTopN(key2, 2) + key3, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1), types.NewIntDatum(3)) + require.NoError(t, err) + topN.AppendTopN(key3, 3) + } + topNs = append(topNs, topN) + } + + // Test merge 2 topN with nil hists. + globalTopN, leftTopN, _, err := MergePartTopN2GlobalTopN(loc, version, topNs, 2, nil, false, &killer) + require.NoError(t, err) + require.Len(t, globalTopN.TopN, 2, "should only have 2 topN") + require.Equal(t, uint64(50), globalTopN.TotalCount(), "should have 50 rows") + require.Len(t, leftTopN, 1, "should have 1 left topN") +} + +func TestMergePartTopN2GlobalTopNWithHists(t *testing.T) { + loc := time.UTC + sc := stmtctx.NewStmtCtxWithTimeZone(loc) + version := 1 + killer := sqlkiller.SQLKiller{} + + // Prepare TopNs. + topNs := make([]*statistics.TopN, 0, 10) + for i := 0; i < 10; i++ { + // Construct TopN, should be key1 -> 2, key2 -> 2, key3 -> 3. + topN := statistics.NewTopN(3) + { + key1, err := codec.EncodeKey(sc, nil, types.NewIntDatum(1)) + require.NoError(t, err) + topN.AppendTopN(key1, 2) + key2, err := codec.EncodeKey(sc, nil, types.NewIntDatum(2)) + require.NoError(t, err) + topN.AppendTopN(key2, 2) + if i%2 == 0 { + key3, err := codec.EncodeKey(sc, nil, types.NewIntDatum(3)) + require.NoError(t, err) + topN.AppendTopN(key3, 3) + } + } + topNs = append(topNs, topN) + } + + // Prepare Hists. + hists := make([]*statistics.Histogram, 0, 10) + for i := 0; i < 10; i++ { + // Construct Hist + h := statistics.NewHistogram(1, 10, 0, 0, types.NewFieldType(mysql.TypeTiny), chunk.InitialCapacity, 0) + h.Bounds.AppendInt64(0, 1) + h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 20}) + h.Bounds.AppendInt64(0, 2) + h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) + h.Bounds.AppendInt64(0, 3) + h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 30}) + h.Bounds.AppendInt64(0, 4) + h.Buckets = append(h.Buckets, statistics.Bucket{Repeat: 10, Count: 40}) + hists = append(hists, h) + } + + // Test merge 2 topN. + globalTopN, leftTopN, _, err := MergePartTopN2GlobalTopN(loc, version, topNs, 2, hists, false, &killer) + require.NoError(t, err) + require.Len(t, globalTopN.TopN, 2, "should only have 2 topN") + require.Equal(t, uint64(55), globalTopN.TotalCount(), "should have 55") + require.Len(t, leftTopN, 1, "should have 1 left topN") +} diff --git a/pkg/statistics/handle/history/history_stats.go b/pkg/statistics/handle/history/history_stats.go index 943186843a9a1..690a265e05eff 100644 --- a/pkg/statistics/handle/history/history_stats.go +++ b/pkg/statistics/handle/history/history_stats.go @@ -77,7 +77,7 @@ func (sh *statsHistoryImpl) RecordHistoricalStatsMeta(tableID int64, version uin } err := util.CallWithSCtx(sh.statsHandle.SPool(), func(sctx sessionctx.Context) error { return RecordHistoricalStatsMeta(sctx, tableID, version, source) - }) + }, util.FlagWrapTxn) if err != nil { // just log the error, hide the error from the outside caller. logutil.BgLogger().Error("record historical stats meta failed", zap.Int64("table-id", tableID), @@ -113,14 +113,6 @@ func RecordHistoricalStatsMeta(sctx sessionctx.Context, tableID int64, version u } modifyCount, count := rows[0].GetInt64(0), rows[0].GetInt64(1) - _, err = util.Exec(sctx, "begin pessimistic") - if err != nil { - return errors.Trace(err) - } - defer func() { - err = util.FinishTransaction(sctx, err) - }() - const sql = "REPLACE INTO mysql.stats_meta_history(table_id, modify_count, count, version, source, create_time) VALUES (%?, %?, %?, %?, %?, NOW())" if _, err := util.Exec(sctx, sql, tableID, modifyCount, count, version, source); err != nil { return errors.Trace(err) diff --git a/pkg/statistics/handle/storage/BUILD.bazel b/pkg/statistics/handle/storage/BUILD.bazel index 6f2b8c7742c7c..789b6dd778f9c 100644 --- a/pkg/statistics/handle/storage/BUILD.bazel +++ b/pkg/statistics/handle/storage/BUILD.bazel @@ -33,6 +33,7 @@ go_library( "//pkg/util/intest", "//pkg/util/logutil", "//pkg/util/memory", + "//pkg/util/sqlescape", "//pkg/util/sqlexec", "@com_github_klauspost_compress//gzip", "@com_github_pingcap_errors//:errors", diff --git a/pkg/statistics/handle/storage/gc.go b/pkg/statistics/handle/storage/gc.go index de6a788de687e..28320718a3463 100644 --- a/pkg/statistics/handle/storage/gc.go +++ b/pkg/statistics/handle/storage/gc.go @@ -388,16 +388,10 @@ func MarkExtendedStatsDeleted(sctx sessionctx.Context, logutil.BgLogger().Warn("unexpected duplicate extended stats records found", zap.String("name", statsName), zap.Int64("table_id", tableID)) } - _, err = util.Exec(sctx, "begin pessimistic") - if err != nil { - return 0, errors.Trace(err) - } defer func() { - err1 := util.FinishTransaction(sctx, err) - if err == nil && err1 == nil { + if err == nil { removeExtendedStatsItem(statsCache, tableID, statsName) } - err = err1 }() version, err := util.GetStartTS(sctx) if err != nil { diff --git a/pkg/statistics/handle/storage/json.go b/pkg/statistics/handle/storage/json.go index a174d38aaa566..25334184b3f50 100644 --- a/pkg/statistics/handle/storage/json.go +++ b/pkg/statistics/handle/storage/json.go @@ -18,7 +18,6 @@ import ( "bytes" "encoding/json" "io" - "sync/atomic" "time" "github.com/klauspost/compress/gzip" @@ -111,8 +110,8 @@ func GenJSONTableFromStats(sctx sessionctx.Context, dbName string, tableInfo *mo } proto := dumpJSONCol(hist, col.CMSketch, col.TopN, col.FMSketch, &col.StatsVer) tracker.Consume(proto.TotalMemoryUsage()) - if atomic.LoadUint32(&sctx.GetSessionVars().Killed) == 1 { - return nil, errors.Trace(statistics.ErrQueryInterrupted) + if err := sctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil { + return nil, err } jsonTbl.Columns[col.Info.Name.L] = proto col.FMSketch.DestroyAndPutToPool() @@ -120,8 +119,8 @@ func GenJSONTableFromStats(sctx sessionctx.Context, dbName string, tableInfo *mo for _, idx := range tbl.Indices { proto := dumpJSONCol(&idx.Histogram, idx.CMSketch, idx.TopN, nil, &idx.StatsVer) tracker.Consume(proto.TotalMemoryUsage()) - if atomic.LoadUint32(&sctx.GetSessionVars().Killed) == 1 { - return nil, errors.Trace(statistics.ErrQueryInterrupted) + if err := sctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil { + return nil, err } jsonTbl.Indices[idx.Info.Name.L] = proto } @@ -285,13 +284,6 @@ func BlocksToJSONTable(blocks [][]byte) (*util.JSONTable, error) { // TableHistoricalStatsToJSON converts the historical stats of a table to JSONTable. func TableHistoricalStatsToJSON(sctx sessionctx.Context, physicalID int64, snapshot uint64) (jt *util.JSONTable, exist bool, err error) { - if _, err := util.Exec(sctx, "begin"); err != nil { - return nil, false, err - } - defer func() { - err = util.FinishTransaction(sctx, err) - }() - // get meta version rows, _, err := util.ExecRows(sctx, "select distinct version from mysql.stats_meta_history where table_id = %? and version <= %? order by version desc limit 1", physicalID, snapshot) if err != nil { diff --git a/pkg/statistics/handle/storage/read.go b/pkg/statistics/handle/storage/read.go index f59dd8e34b82c..e8c0894148b2c 100644 --- a/pkg/statistics/handle/storage/read.go +++ b/pkg/statistics/handle/storage/read.go @@ -18,7 +18,6 @@ import ( "encoding/json" "fmt" "strconv" - "sync/atomic" "time" "github.com/pingcap/errors" @@ -74,8 +73,7 @@ func HistogramFromStorage(sctx sessionctx.Context, tableID int64, colID int64, t // Invalid date values may be inserted into table under some relaxed sql mode. Those values may exist in statistics. // Hence, when reading statistics, we should skip invalid date check. See #39336. sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - sc.AllowInvalidDate = true - sc.IgnoreZeroInDate = true + sc.SetTypeFlags(sc.TypeFlags().WithIgnoreInvalidDateErr(true).WithIgnoreZeroInDate(true)) d := rows[i].GetDatum(2, &fields[2].Column.FieldType) // For new collation data, when storing the bounds of the histogram, we store the collate key instead of the // original value. @@ -87,12 +85,12 @@ func HistogramFromStorage(sctx sessionctx.Context, tableID int64, colID int64, t if tp.EvalType() == types.ETString && tp.GetType() != mysql.TypeEnum && tp.GetType() != mysql.TypeSet { tp = types.NewFieldType(mysql.TypeBlob) } - lowerBound, err = d.ConvertTo(sc, tp) + lowerBound, err = d.ConvertTo(sc.TypeCtx(), tp) if err != nil { return nil, errors.Trace(err) } d = rows[i].GetDatum(3, &fields[3].Column.FieldType) - upperBound, err = d.ConvertTo(sc, tp) + upperBound, err = d.ConvertTo(sc.TypeCtx(), tp) if err != nil { return nil, errors.Trace(err) } @@ -450,8 +448,8 @@ func TableStatsFromStorage(sctx sessionctx.Context, snapshot uint64, tableInfo * return nil, nil } for _, row := range rows { - if atomic.LoadUint32(&sctx.GetSessionVars().Killed) == 1 { - return nil, errors.Trace(statistics.ErrQueryInterrupted) + if err := sctx.GetSessionVars().SQLKiller.HandleSignal(); err != nil { + return nil, err } if row.GetInt64(1) > 0 { err = indexStatsFromStorage(sctx, row, table, tableInfo, loadAll, lease, tracker) diff --git a/pkg/statistics/handle/storage/save.go b/pkg/statistics/handle/storage/save.go index 8ff6660728028..179968af0beed 100644 --- a/pkg/statistics/handle/storage/save.go +++ b/pkg/statistics/handle/storage/save.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlescape" "github.com/pingcap/tidb/pkg/util/sqlexec" "go.uber.org/zap" ) @@ -52,7 +53,7 @@ func saveTopNToStorage(sctx sessionctx.Context, tableID int64, isIndex int, hist sql.WriteString("insert into mysql.stats_top_n (table_id, is_index, hist_id, value, count) values ") for j := i; j < end; j++ { topn := topN.TopN[j] - val := sqlexec.MustEscapeSQL("(%?, %?, %?, %?, %?)", tableID, isIndex, histID, topn.Encoded, topn.Count) + val := sqlescape.MustEscapeSQL("(%?, %?, %?, %?, %?)", tableID, isIndex, histID, topn.Encoded, topn.Count) if j > i { val = "," + val } @@ -89,7 +90,7 @@ func saveBucketsToStorage(sctx sessionctx.Context, tableID int64, isIndex int, h count -= hg.Buckets[j-1].Count } var upperBound types.Datum - upperBound, err = hg.GetUpper(j).ConvertTo(sc, types.NewFieldType(mysql.TypeBlob)) + upperBound, err = hg.GetUpper(j).ConvertTo(sc.TypeCtx(), types.NewFieldType(mysql.TypeBlob)) if err != nil { return } @@ -97,11 +98,11 @@ func saveBucketsToStorage(sctx sessionctx.Context, tableID int64, isIndex int, h lastAnalyzePos = upperBound.GetBytes() } var lowerBound types.Datum - lowerBound, err = hg.GetLower(j).ConvertTo(sc, types.NewFieldType(mysql.TypeBlob)) + lowerBound, err = hg.GetLower(j).ConvertTo(sc.TypeCtx(), types.NewFieldType(mysql.TypeBlob)) if err != nil { return } - val := sqlexec.MustEscapeSQL("(%?, %?, %?, %?, %?, %?, %?, %?, %?)", tableID, isIndex, hg.ID, j, count, bucket.Repeat, lowerBound.GetBytes(), upperBound.GetBytes(), bucket.NDV) + val := sqlescape.MustEscapeSQL("(%?, %?, %?, %?, %?, %?, %?, %?, %?)", tableID, isIndex, hg.ID, j, count, bucket.Repeat, lowerBound.GetBytes(), upperBound.GetBytes(), bucket.NDV) if j > i { val = "," + val } @@ -125,13 +126,6 @@ func SaveTableStatsToStorage(sctx sessionctx.Context, needDumpFMS := results.TableID.IsPartitionTable() tableID := results.TableID.GetStatisticsID() ctx := util.StatsCtx - _, err = util.Exec(sctx, "begin pessimistic") - if err != nil { - return 0, err - } - defer func() { - err = util.FinishTransaction(sctx, err) - }() txn, err := sctx.Txn(true) if err != nil { return 0, err @@ -334,13 +328,6 @@ func SaveTableStatsToStorage(sctx sessionctx.Context, func SaveStatsToStorage(sctx sessionctx.Context, tableID int64, count, modifyCount int64, isIndex int, hg *statistics.Histogram, cms *statistics.CMSketch, topN *statistics.TopN, statsVersion int, isAnalyzed int64, updateAnalyzeTime bool) (statsVer uint64, err error) { - _, err = util.Exec(sctx, "begin pessimistic") - if err != nil { - return 0, errors.Trace(err) - } - defer func() { - err = util.FinishTransaction(sctx, err) - }() version, err := util.GetStartTS(sctx) if err != nil { return 0, errors.Trace(err) @@ -404,13 +391,6 @@ func SaveStatsToStorage(sctx sessionctx.Context, func SaveMetaToStorage( sctx sessionctx.Context, tableID, count, modifyCount int64) (statsVer uint64, err error) { - _, err = util.Exec(sctx, "begin") - if err != nil { - return 0, errors.Trace(err) - } - defer func() { - err = util.FinishTransaction(sctx, err) - }() version, err := util.GetStartTS(sctx) if err != nil { return 0, errors.Trace(err) diff --git a/pkg/statistics/handle/storage/stats_read_writer.go b/pkg/statistics/handle/storage/stats_read_writer.go index b35879bce7c4e..b187b7b1e557b 100644 --- a/pkg/statistics/handle/storage/stats_read_writer.go +++ b/pkg/statistics/handle/storage/stats_read_writer.go @@ -86,7 +86,7 @@ func (s *statsReadWriter) InsertColStats2KV(physicalID int64, colInfos []*model. count := req.GetRow(0).GetInt64(0) for _, colInfo := range colInfos { value := types.NewDatum(colInfo.GetOriginDefaultValue()) - value, err = value.ConvertTo(sctx.GetSessionVars().StmtCtx, &colInfo.FieldType) + value, err = value.ConvertTo(sctx.GetSessionVars().StmtCtx.TypeCtx(), &colInfo.FieldType) if err != nil { return err } @@ -100,7 +100,7 @@ func (s *statsReadWriter) InsertColStats2KV(physicalID int64, colInfos []*model. if _, err := util.Exec(sctx, "insert into mysql.stats_histograms (version, table_id, is_index, hist_id, distinct_count, tot_col_size) values (%?, %?, 0, %?, 1, %?)", startTS, physicalID, colInfo.ID, int64(len(value.GetBytes()))*count); err != nil { return err } - value, err = value.ConvertTo(sctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeBlob)) + value, err = value.ConvertTo(sctx.GetSessionVars().StmtCtx.TypeCtx(), types.NewFieldType(mysql.TypeBlob)) if err != nil { return err } @@ -161,7 +161,8 @@ func (s *statsReadWriter) ChangeGlobalStatsID(from, to int64) (err error) { }, util.FlagWrapTxn) } -// ResetTableStats2KVForDrop resets the count to 0. +// ResetTableStats2KVForDrop update the version of mysql.stats_meta. +// Then GC worker will delete the old version of stats. func (s *statsReadWriter) ResetTableStats2KVForDrop(physicalID int64) (err error) { statsVer := uint64(0) defer func() { @@ -196,7 +197,7 @@ func (s *statsReadWriter) SaveTableStatsToStorage(results *statistics.AnalyzeRes err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error { statsVer, err = SaveTableStatsToStorage(sctx, results, analyzeSnapshot) return err - }) + }, util.FlagWrapTxn) if err == nil && statsVer != 0 { tableID := results.TableID.GetStatisticsID() s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, source, true) @@ -238,7 +239,7 @@ func (s *statsReadWriter) SaveStatsToStorage(tableID int64, count, modifyCount i statsVer, err = SaveStatsToStorage(sctx, tableID, count, modifyCount, isIndex, hg, cms, topN, statsVersion, isAnalyzed, updateAnalyzeTime) return err - }) + }, util.FlagWrapTxn) if err == nil && statsVer != 0 { s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, source, false) } @@ -251,7 +252,7 @@ func (s *statsReadWriter) saveMetaToStorage(tableID, count, modifyCount int64, s err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error { statsVer, err = SaveMetaToStorage(sctx, tableID, count, modifyCount) return err - }) + }, util.FlagWrapTxn) if err == nil && statsVer != 0 { s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, source, false) } @@ -264,7 +265,7 @@ func (s *statsReadWriter) InsertExtendedStats(statsName string, colIDs []int64, err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error { statsVer, err = InsertExtendedStats(sctx, s.statsHandler, statsName, colIDs, tp, tableID, ifNotExists) return err - }) + }, util.FlagWrapTxn) if err == nil && statsVer != 0 { s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, "extended stats", false) } @@ -277,7 +278,7 @@ func (s *statsReadWriter) MarkExtendedStatsDeleted(statsName string, tableID int err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error { statsVer, err = MarkExtendedStatsDeleted(sctx, s.statsHandler, statsName, tableID, ifExists) return err - }) + }, util.FlagWrapTxn) if err == nil && statsVer != 0 { s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, "extended stats", false) } @@ -290,13 +291,28 @@ func (s *statsReadWriter) SaveExtendedStatsToStorage(tableID int64, extStats *st err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error { statsVer, err = SaveExtendedStatsToStorage(sctx, tableID, extStats, isLoad) return err - }) + }, util.FlagWrapTxn) if err == nil && statsVer != 0 { s.statsHandler.RecordHistoricalStatsMeta(tableID, statsVer, "extended stats", false) } return } +func (s *statsReadWriter) LoadTablePartitionStats(tableInfo *model.TableInfo, partitionDef *model.PartitionDefinition) (*statistics.Table, error) { + var partitionStats *statistics.Table + partitionStats, err := s.TableStatsFromStorage(tableInfo, partitionDef.ID, true, 0) + if err != nil { + return nil, err + } + // if the err == nil && partitionStats == nil, it means we lack the partition-level stats which the physicalID is equal to partitionID. + if partitionStats == nil { + errMsg := fmt.Sprintf("table `%s` partition `%s`", tableInfo.Name.L, partitionDef.Name.L) + err = types.ErrPartitionStatsMissing.GenWithStackByArgs(errMsg) + return nil, err + } + return partitionStats, nil +} + // LoadNeededHistograms will load histograms for those needed columns/indices. func (s *statsReadWriter) LoadNeededHistograms() (err error) { err = util.CallWithSCtx(s.statsHandler.SPool(), func(sctx sessionctx.Context) error { diff --git a/pkg/statistics/handle/storage/update.go b/pkg/statistics/handle/storage/update.go index 50ca730eeeb2c..bc304ff596e90 100644 --- a/pkg/statistics/handle/storage/update.go +++ b/pkg/statistics/handle/storage/update.go @@ -111,13 +111,6 @@ func InsertExtendedStats(sctx sessionctx.Context, } strColIDs := string(bytes) - _, err = statsutil.Exec(sctx, "begin pessimistic") - if err != nil { - return 0, errors.Trace(err) - } - defer func() { - err = statsutil.FinishTransaction(sctx, err) - }() // No need to use `exec.ExecuteInternal` since we have acquired the lock. rows, _, err := statsutil.ExecRows(sctx, "SELECT name, type, column_ids FROM mysql.stats_extended WHERE table_id = %? and status in (%?, %?)", tableID, statistics.ExtendedStatsInited, statistics.ExtendedStatsAnalyzed) if err != nil { @@ -170,13 +163,6 @@ func SaveExtendedStatsToStorage(sctx sessionctx.Context, return 0, nil } - _, err = statsutil.Exec(sctx, "begin pessimistic") - if err != nil { - return 0, errors.Trace(err) - } - defer func() { - err = statsutil.FinishTransaction(sctx, err) - }() version, err := statsutil.GetStartTS(sctx) if err != nil { return 0, errors.Trace(err) diff --git a/pkg/statistics/handle/usage/BUILD.bazel b/pkg/statistics/handle/usage/BUILD.bazel index 94c182b87e03d..10b9760d723d4 100644 --- a/pkg/statistics/handle/usage/BUILD.bazel +++ b/pkg/statistics/handle/usage/BUILD.bazel @@ -21,7 +21,7 @@ go_library( "//pkg/types", "//pkg/util", "//pkg/util/logutil", - "//pkg/util/sqlexec", + "//pkg/util/sqlescape", "@com_github_pingcap_errors//:errors", "@org_uber_go_zap//:zap", ], diff --git a/pkg/statistics/handle/usage/index_usage.go b/pkg/statistics/handle/usage/index_usage.go index a84a19d09016e..e3dba8e5bf778 100644 --- a/pkg/statistics/handle/usage/index_usage.go +++ b/pkg/statistics/handle/usage/index_usage.go @@ -23,7 +23,7 @@ import ( "github.com/pingcap/tidb/pkg/sessionctx" "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/sqlexec" + "github.com/pingcap/tidb/pkg/util/sqlescape" ) // NewSessionIndexUsageCollector creates a new IndexUsageCollector on the list. @@ -182,16 +182,16 @@ func dumpIndexUsageToKV(sctx sessionctx.Context, listHead *SessionIndexUsageColl end = len(mapper) } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "insert into mysql.SCHEMA_INDEX_USAGE (table_id,index_id,query_count,rows_selected,last_used_at) values") + sqlescape.MustFormatSQL(sql, "insert into mysql.SCHEMA_INDEX_USAGE (table_id,index_id,query_count,rows_selected,last_used_at) values") for j := i; j < end; j++ { index := indexInformationSlice[j] - sqlexec.MustFormatSQL(sql, "(%?, %?, %?, %?, %?)", index.id.TableID, index.id.IndexID, + sqlescape.MustFormatSQL(sql, "(%?, %?, %?, %?, %?)", index.id.TableID, index.id.IndexID, index.information.QueryCount, index.information.RowsSelected, index.information.LastUsedAt) if j < end-1 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } } - sqlexec.MustFormatSQL(sql, "on duplicate key update query_count=query_count+values(query_count),rows_selected=rows_selected+values(rows_selected),last_used_at=greatest(last_used_at, values(last_used_at))") + sqlescape.MustFormatSQL(sql, "on duplicate key update query_count=query_count+values(query_count),rows_selected=rows_selected+values(rows_selected),last_used_at=greatest(last_used_at, values(last_used_at))") if _, _, err := util.ExecRows(sctx, sql.String()); err != nil { return errors.Trace(err) } diff --git a/pkg/statistics/handle/usage/session_stats_collect.go b/pkg/statistics/handle/usage/session_stats_collect.go index 6ae070f09e996..2821020143af4 100644 --- a/pkg/statistics/handle/usage/session_stats_collect.go +++ b/pkg/statistics/handle/usage/session_stats_collect.go @@ -30,7 +30,7 @@ import ( utilstats "github.com/pingcap/tidb/pkg/statistics/handle/util" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util" - "github.com/pingcap/tidb/pkg/util/sqlexec" + "github.com/pingcap/tidb/pkg/util/sqlescape" ) var ( @@ -234,16 +234,16 @@ func (s *statsUsageImpl) DumpColStatsUsageToKV() error { end = len(pairs) } sql := new(strings.Builder) - sqlexec.MustFormatSQL(sql, "INSERT INTO mysql.column_stats_usage (table_id, column_id, last_used_at) VALUES ") + sqlescape.MustFormatSQL(sql, "INSERT INTO mysql.column_stats_usage (table_id, column_id, last_used_at) VALUES ") for j := i; j < end; j++ { // Since we will use some session from session pool to execute the insert statement, we pass in UTC time here and covert it // to the session's time zone when executing the insert statement. In this way we can make the stored time right. - sqlexec.MustFormatSQL(sql, "(%?, %?, CONVERT_TZ(%?, '+00:00', @@TIME_ZONE))", pairs[j].tblColID.TableID, pairs[j].tblColID.ID, pairs[j].lastUsedAt) + sqlescape.MustFormatSQL(sql, "(%?, %?, CONVERT_TZ(%?, '+00:00', @@TIME_ZONE))", pairs[j].tblColID.TableID, pairs[j].tblColID.ID, pairs[j].lastUsedAt) if j < end-1 { - sqlexec.MustFormatSQL(sql, ",") + sqlescape.MustFormatSQL(sql, ",") } } - sqlexec.MustFormatSQL(sql, " ON DUPLICATE KEY UPDATE last_used_at = CASE WHEN last_used_at IS NULL THEN VALUES(last_used_at) ELSE GREATEST(last_used_at, VALUES(last_used_at)) END") + sqlescape.MustFormatSQL(sql, " ON DUPLICATE KEY UPDATE last_used_at = CASE WHEN last_used_at IS NULL THEN VALUES(last_used_at) ELSE GREATEST(last_used_at, VALUES(last_used_at)) END") if err := utilstats.CallWithSCtx(s.statsHandle.SPool(), func(sctx sessionctx.Context) error { _, _, err := utilstats.ExecRows(sctx, sql.String()) return err diff --git a/pkg/statistics/handle/util/interfaces.go b/pkg/statistics/handle/util/interfaces.go index 6d2d1d9db81c0..e3cd3a2ef8434 100644 --- a/pkg/statistics/handle/util/interfaces.go +++ b/pkg/statistics/handle/util/interfaces.go @@ -222,6 +222,9 @@ type StatsReadWriter interface { // TableStatsFromStorage loads table stats info from storage. TableStatsFromStorage(tableInfo *model.TableInfo, physicalID int64, loadAll bool, snapshot uint64) (statsTbl *statistics.Table, err error) + // LoadTablePartitionStats loads partition stats info from storage. + LoadTablePartitionStats(tableInfo *model.TableInfo, partitionDef *model.PartitionDefinition) (*statistics.Table, error) + // StatsMetaCountAndModifyCount reads count and modify_count for the given table from mysql.stats_meta. StatsMetaCountAndModifyCount(tableID int64) (count, modifyCount int64, err error) @@ -249,7 +252,8 @@ type StatsReadWriter interface { // then tidb-server will reload automatic. UpdateStatsVersion() error - // ResetTableStats2KVForDrop resets the count to 0. + // ResetTableStats2KVForDrop update the version of mysql.stats_meta. + // Then GC worker will delete the old version of stats. ResetTableStats2KVForDrop(physicalID int64) (err error) // ChangeGlobalStatsID changes the global stats ID. diff --git a/pkg/statistics/handle/util/util.go b/pkg/statistics/handle/util/util.go index 5ed0da604c73c..50add779777d0 100644 --- a/pkg/statistics/handle/util/util.go +++ b/pkg/statistics/handle/util/util.go @@ -66,10 +66,10 @@ type SessionPool interface { Put(pools.Resource) } -// FinishTransaction will execute `commit` when error is nil, otherwise `rollback`. -func FinishTransaction(sctx sessionctx.Context, err error) error { +// finishTransaction will execute `commit` when error is nil, otherwise `rollback`. +func finishTransaction(sctx sessionctx.Context, err error) error { if err == nil { - _, _, err = ExecRows(sctx, "commit") + _, _, err = ExecRows(sctx, "COMMIT") } else { _, _, err1 := ExecRows(sctx, "rollback") terror.Log(errors.Trace(err1)) @@ -174,11 +174,11 @@ func UpdateSCtxVarsForStats(sctx sessionctx.Context) error { // WrapTxn uses a transaction here can let different SQLs in this operation have the same data visibility. func WrapTxn(sctx sessionctx.Context, f func(sctx sessionctx.Context) error) (err error) { // TODO: check whether this sctx is already in a txn - if _, _, err := ExecRows(sctx, "begin"); err != nil { + if _, _, err := ExecRows(sctx, "BEGIN PESSIMISTIC"); err != nil { return err } defer func() { - err = FinishTransaction(sctx, err) + err = finishTransaction(sctx, err) }() err = f(sctx) return diff --git a/pkg/statistics/histogram.go b/pkg/statistics/histogram.go index f254620856ece..a94f1512720ab 100644 --- a/pkg/statistics/histogram.go +++ b/pkg/statistics/histogram.go @@ -202,7 +202,7 @@ func (hg *Histogram) ConvertTo(sc *stmtctx.StatementContext, tp *types.FieldType iter := chunk.NewIterator4Chunk(hg.Bounds) for row := iter.Begin(); row != iter.End(); row = iter.Next() { d := row.GetDatum(0, hg.Tp) - d, err := d.ConvertTo(sc, tp) + d, err := d.ConvertTo(sc.TypeCtx(), tp) if err != nil { return nil, errors.Trace(err) } @@ -296,16 +296,18 @@ func (hg *Histogram) BinarySearchRemoveVal(valCntPairs TopNMeta) { return } } + var midIdx = 0 + var found bool for lowIdx <= highIdx { - midIdx := (lowIdx + highIdx) / 2 + midIdx = (lowIdx + highIdx) / 2 cmpResult := bytes.Compare(hg.Bounds.Column(0).GetRaw(midIdx*2), valCntPairs.Encoded) if cmpResult > 0 { - lowIdx = midIdx + 1 + highIdx = midIdx - 1 continue } cmpResult = bytes.Compare(hg.Bounds.Column(0).GetRaw(midIdx*2+1), valCntPairs.Encoded) if cmpResult < 0 { - highIdx = midIdx - 1 + lowIdx = midIdx + 1 continue } if hg.Buckets[midIdx].NDV > 0 { @@ -318,8 +320,17 @@ func (hg *Histogram) BinarySearchRemoveVal(valCntPairs TopNMeta) { if hg.Buckets[midIdx].Count < 0 { hg.Buckets[midIdx].Count = 0 } + found = true break } + if found { + for midIdx++; midIdx <= hg.Len()-1; midIdx++ { + hg.Buckets[midIdx].Count -= int64(valCntPairs.Count) + if hg.Buckets[midIdx].Count < 0 { + hg.Buckets[midIdx].Count = 0 + } + } + } } // RemoveVals remove the given values from the histogram. @@ -821,7 +832,7 @@ func MergeHistograms(sc *stmtctx.StatementContext, lh *Histogram, rh *Histogram, } lh.NDV += rh.NDV lLen := lh.Len() - cmp, err := lh.GetUpper(lLen-1).Compare(sc, rh.GetLower(0), collate.GetBinaryCollator()) + cmp, err := lh.GetUpper(lLen-1).Compare(sc.TypeCtx(), rh.GetLower(0), collate.GetBinaryCollator()) if err != nil { return nil, errors.Trace(err) } @@ -908,7 +919,14 @@ func (hg *Histogram) OutOfRange(val types.Datum) bool { │ │ lDatum rDatum */ -func (hg *Histogram) OutOfRangeRowCount(sctx sessionctx.Context, lDatum, rDatum *types.Datum, modifyCount, histNDV int64) (result float64) { +// The percentage of shaded area on the left side calculation formula is: +// leftPercent = (math.Pow(actualR-boundL, 2) - math.Pow(actualL-boundL, 2)) / math.Pow(histWidth, 2) +// You can find more details at https://github.com/pingcap/tidb/pull/47966#issuecomment-1778866876 +func (hg *Histogram) OutOfRangeRowCount( + sctx sessionctx.Context, + lDatum, rDatum *types.Datum, + modifyCount, histNDV int64, +) (result float64) { debugTrace := sctx.GetSessionVars().StmtCtx.EnableOptimizerDebugTrace if debugTrace { debugtrace.EnterContextCommon(sctx) @@ -1236,7 +1254,7 @@ func mergeBucketNDV(sc *stmtctx.StatementContext, left *bucket4Merging, right *b res.NDV = left.NDV return &res, nil } - upperCompare, err := right.upper.Compare(sc, left.upper, collate.GetBinaryCollator()) + upperCompare, err := right.upper.Compare(sc.TypeCtx(), left.upper, collate.GetBinaryCollator()) if err != nil { return nil, err } @@ -1250,7 +1268,7 @@ func mergeBucketNDV(sc *stmtctx.StatementContext, left *bucket4Merging, right *b // ___left__| // They have the same upper. if upperCompare == 0 { - lowerCompare, err := right.lower.Compare(sc, left.lower, collate.GetBinaryCollator()) + lowerCompare, err := right.lower.Compare(sc.TypeCtx(), left.lower, collate.GetBinaryCollator()) if err != nil { return nil, err } @@ -1281,7 +1299,7 @@ func mergeBucketNDV(sc *stmtctx.StatementContext, left *bucket4Merging, right *b // ____right___| // ____left__| // right.upper > left.upper - lowerCompareUpper, err := right.lower.Compare(sc, left.upper, collate.GetBinaryCollator()) + lowerCompareUpper, err := right.lower.Compare(sc.TypeCtx(), left.upper, collate.GetBinaryCollator()) if err != nil { return nil, err } @@ -1298,7 +1316,7 @@ func mergeBucketNDV(sc *stmtctx.StatementContext, left *bucket4Merging, right *b return &res, nil } upperRatio := calcFraction4Datums(right.lower, right.upper, left.upper) - lowerCompare, err := right.lower.Compare(sc, left.lower, collate.GetBinaryCollator()) + lowerCompare, err := right.lower.Compare(sc.TypeCtx(), left.lower, collate.GetBinaryCollator()) if err != nil { return nil, err } @@ -1352,7 +1370,7 @@ func mergePartitionBuckets(sc *stmtctx.StatementContext, buckets []*bucket4Mergi for i := len(buckets) - 1; i >= 0; i-- { totNDV += buckets[i].NDV res.Count += buckets[i].Count - compare, err := buckets[i].upper.Compare(sc, res.upper, collate.GetBinaryCollator()) + compare, err := buckets[i].upper.Compare(sc.TypeCtx(), res.upper, collate.GetBinaryCollator()) if err != nil { return nil, err } @@ -1408,7 +1426,7 @@ func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histog continue } tmpValue := hist.GetLower(0) - res, err := tmpValue.Compare(sc, minValue, collate.GetBinaryCollator()) + res, err := tmpValue.Compare(sc.TypeCtx(), minValue, collate.GetBinaryCollator()) if err != nil { return nil, err } @@ -1437,7 +1455,7 @@ func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histog minValue = d.Clone() continue } - res, err := d.Compare(sc, minValue, collate.GetBinaryCollator()) + res, err := d.Compare(sc.TypeCtx(), minValue, collate.GetBinaryCollator()) if err != nil { return nil, err } @@ -1462,14 +1480,14 @@ func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histog var sortError error slices.SortFunc(buckets, func(i, j *bucket4Merging) int { - res, err := i.upper.Compare(sc, j.upper, collate.GetBinaryCollator()) + res, err := i.upper.Compare(sc.TypeCtx(), j.upper, collate.GetBinaryCollator()) if err != nil { sortError = err } if res != 0 { return res } - res, err = i.lower.Compare(sc, j.lower, collate.GetBinaryCollator()) + res, err = i.lower.Compare(sc.TypeCtx(), j.lower, collate.GetBinaryCollator()) if err != nil { sortError = err } @@ -1489,7 +1507,7 @@ func MergePartitionHist2GlobalHist(sc *stmtctx.StatementContext, hists []*Histog bucketNDV += buckets[i].NDV if sum >= totCount*bucketCount/expBucketNumber && sum-prevSum >= gBucketCountThreshold { for ; i > 0; i-- { // if the buckets have the same upper, we merge them into the same new buckets. - res, err := buckets[i-1].upper.Compare(sc, buckets[i].upper, collate.GetBinaryCollator()) + res, err := buckets[i-1].upper.Compare(sc.TypeCtx(), buckets[i].upper, collate.GetBinaryCollator()) if err != nil { return nil, err } diff --git a/pkg/statistics/histogram_test.go b/pkg/statistics/histogram_test.go index 15da03047b8fd..2bcd9d93bb105 100644 --- a/pkg/statistics/histogram_test.go +++ b/pkg/statistics/histogram_test.go @@ -532,3 +532,37 @@ func TestStandardizeForV2AnalyzeIndex(t *testing.T) { fmt.Sprintf("testData[%d].inputHist:%s", i, test.inputHistToStr)) } } + +func generateData(t *testing.T) *Histogram { + var data []*bucket4Test + sumCount := int64(0) + for n := 100; n < 10000; n = n + 100 { + sumCount += 100 + data = append(data, &bucket4Test{ + lower: int64(n), + upper: int64(n + 100), + count: sumCount, + repeat: 10, + ndv: 10, + }) + } + return genHist4Test(t, data, 0) +} + +func TestVerifyHistsBinarySearchRemoveValAndRemoveVals(t *testing.T) { + data1 := generateData(t) + data2 := generateData(t) + + require.Equal(t, data1, data2) + ctx := mock.NewContext() + sc := ctx.GetSessionVars().StmtCtx + b, err := codec.EncodeKey(sc, nil, types.NewIntDatum(150)) + require.NoError(t, err) + tmp := TopNMeta{ + Encoded: b, + Count: 2, + } + data1.RemoveVals([]TopNMeta{tmp}) + data2.BinarySearchRemoveVal(tmp) + require.Equal(t, data1, data2) +} diff --git a/pkg/statistics/main_test.go b/pkg/statistics/main_test.go index 818e3bf1e46e3..0a460ffa6139c 100644 --- a/pkg/statistics/main_test.go +++ b/pkg/statistics/main_test.go @@ -128,7 +128,7 @@ func createTestStatisticsSamples(t *testing.T) *testStatisticsSamples { for i := start; i < rc.count; i += 5 { rc.data[i].SetInt64(rc.data[i].GetInt64() + 2) } - require.NoError(t, types.SortDatums(sc, rc.data)) + require.NoError(t, types.SortDatums(sc.TypeCtx(), rc.data)) s.rc = rc diff --git a/pkg/statistics/sample.go b/pkg/statistics/sample.go index c813f7e7886ff..54dc0a4892d13 100644 --- a/pkg/statistics/sample.go +++ b/pkg/statistics/sample.go @@ -68,7 +68,7 @@ func SortSampleItems(sc *stmtctx.StatementContext, items []*SampleItem) ([]*Samp var err error slices.SortStableFunc(sortedItems, func(i, j *SampleItem) int { var cmp int - cmp, err = i.Value.Compare(sc, &j.Value, collate.GetBinaryCollator()) + cmp, err = i.Value.Compare(sc.TypeCtx(), &j.Value, collate.GetBinaryCollator()) if err != nil { return -1 } diff --git a/pkg/statistics/scalar.go b/pkg/statistics/scalar.go index 92f3e52db8f41..b84d0933e6fa1 100644 --- a/pkg/statistics/scalar.go +++ b/pkg/statistics/scalar.go @@ -73,7 +73,7 @@ func convertDatumToScalar(value *types.Datum, commonPfxLen int) float64 { minTime = types.MinTimestamp } sc := stmtctx.NewStmtCtxWithTimeZone(types.BoundTimezone) - return float64(valueTime.Sub(sc, &minTime).Duration) + return float64(valueTime.Sub(sc.TypeCtx(), &minTime).Duration) case types.KindString, types.KindBytes: bytes := value.GetBytes() if len(bytes) <= commonPfxLen { @@ -275,19 +275,19 @@ func EnumRangeValues(low, high types.Datum, lowExclude, highExclude bool) []type } fsp := max(lowTime.Fsp(), highTime.Fsp()) var stepSize int64 - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext if lowTime.Type() == mysql.TypeDate { stepSize = 24 * int64(time.Hour) lowTime.SetCoreTime(types.FromDate(lowTime.Year(), lowTime.Month(), lowTime.Day(), 0, 0, 0, 0)) } else { var err error - lowTime, err = lowTime.RoundFrac(sc, fsp) + lowTime, err = lowTime.RoundFrac(typeCtx, fsp) if err != nil { return nil } stepSize = int64(math.Pow10(types.MaxFsp-fsp)) * int64(time.Microsecond) } - remaining := int64(highTime.Sub(sc, &lowTime).Duration)/stepSize + 1 - int64(exclude) + remaining := int64(highTime.Sub(typeCtx, &lowTime).Duration)/stepSize + 1 - int64(exclude) // When `highTime` is much larger than `lowTime`, `remaining` may be overflowed to a negative value. if remaining <= 0 || remaining >= maxNumStep { return nil @@ -295,14 +295,14 @@ func EnumRangeValues(low, high types.Datum, lowExclude, highExclude bool) []type startValue := lowTime var err error if lowExclude { - startValue, err = lowTime.Add(sc, types.Duration{Duration: time.Duration(stepSize), Fsp: fsp}) + startValue, err = lowTime.Add(typeCtx, types.Duration{Duration: time.Duration(stepSize), Fsp: fsp}) if err != nil { return nil } } values := make([]types.Datum, 0, remaining) for i := int64(0); i < remaining; i++ { - value, err := startValue.Add(sc, types.Duration{Duration: time.Duration(i * stepSize), Fsp: fsp}) + value, err := startValue.Add(typeCtx, types.Duration{Duration: time.Duration(i * stepSize), Fsp: fsp}) if err != nil { return nil } diff --git a/pkg/statistics/scalar_test.go b/pkg/statistics/scalar_test.go index 97ed17e438a72..d83f1fbb8f47c 100644 --- a/pkg/statistics/scalar_test.go +++ b/pkg/statistics/scalar_test.go @@ -35,7 +35,7 @@ func getDecimal(value float64) *types.MyDecimal { } func getDuration(value string) types.Duration { - dur, _, _ := types.ParseDuration(nil, value, 0) + dur, _, _ := types.ParseDuration(types.DefaultStmtNoWarningContext, value, 0) return dur } diff --git a/pkg/statistics/statistics_test.go b/pkg/statistics/statistics_test.go index d126d5922e8dc..b4eb55efa9afc 100644 --- a/pkg/statistics/statistics_test.go +++ b/pkg/statistics/statistics_test.go @@ -180,11 +180,11 @@ func TestMergeHistogram(t *testing.T) { require.Equal(t, tt.bucketNum, h.Len()) require.Equal(t, tt.leftNum+tt.rightNum, int64(h.TotalRowCount())) expectLower := types.NewIntDatum(tt.leftLower) - cmp, err := h.GetLower(0).Compare(sc, &expectLower, collate.GetBinaryCollator()) + cmp, err := h.GetLower(0).Compare(sc.TypeCtx(), &expectLower, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) expectUpper := types.NewIntDatum(tt.rightLower + tt.rightNum - 1) - cmp, err = h.GetUpper(h.Len()-1).Compare(sc, &expectUpper, collate.GetBinaryCollator()) + cmp, err = h.GetUpper(h.Len()-1).Compare(sc.TypeCtx(), &expectUpper, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) } diff --git a/pkg/store/copr/BUILD.bazel b/pkg/store/copr/BUILD.bazel index 9ad1a7250ccac..673cbffce8ade 100644 --- a/pkg/store/copr/BUILD.bazel +++ b/pkg/store/copr/BUILD.bazel @@ -33,7 +33,6 @@ go_library( "//pkg/util/execdetails", "//pkg/util/intest", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "//pkg/util/paging", "//pkg/util/tiflash", diff --git a/pkg/store/copr/coprocessor.go b/pkg/store/copr/coprocessor.go index 770b6a3dd6546..8f5f55444897c 100644 --- a/pkg/store/copr/coprocessor.go +++ b/pkg/store/copr/coprocessor.go @@ -45,9 +45,9 @@ import ( "github.com/pingcap/tidb/pkg/store/driver/backoff" derr "github.com/pingcap/tidb/pkg/store/driver/error" "github.com/pingcap/tidb/pkg/store/driver/options" + util2 "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/execdetails" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/memory" "github.com/pingcap/tidb/pkg/util/paging" "github.com/pingcap/tidb/pkg/util/tracing" @@ -376,7 +376,7 @@ func buildCopTasks(bo *Backoffer, ranges *KeyRanges, opt *buildCopTaskOpt) ([]*c pagingSize = req.Paging.MinPagingSize } for i := 0; i < rLen; { - nextI := mathutil.Min(i+rangesPerTaskLimit, rLen) + nextI := min(i+rangesPerTaskLimit, rLen) hint := -1 // calculate the row count hint if hints != nil { @@ -1117,7 +1117,7 @@ func (worker *copIteratorWorker) handleTask(ctx context.Context, task *copTask, logutil.BgLogger().Error("copIteratorWork meet panic", zap.Any("r", r), zap.Stack("stack trace")) - resp := &copResponse{err: errors.Errorf("%v", r)} + resp := &copResponse{err: util2.GetRecoverError(r)} // if panic has happened, set checkOOM to false to avoid another panic. worker.sendToRespCh(resp, respCh, false) } diff --git a/pkg/store/copr/mpp.go b/pkg/store/copr/mpp.go index cd0695a3e0d9d..8998a570f67d6 100644 --- a/pkg/store/copr/mpp.go +++ b/pkg/store/copr/mpp.go @@ -31,7 +31,6 @@ import ( "github.com/pingcap/tidb/pkg/store/driver/backoff" "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/pingcap/tidb/pkg/util/tiflash" "github.com/pingcap/tidb/pkg/util/tiflashcompute" "github.com/tikv/client-go/v2/tikv" @@ -174,7 +173,7 @@ func (c *MPPClient) DispatchMPPTask(param kv.DispatchMPPTaskParam) (resp *mpp.Di } if len(realResp.RetryRegions) > 0 { - logutil.BgLogger().Info("TiFlash found " + strconv.Itoa(len(realResp.RetryRegions)) + " stale regions. Only first " + strconv.Itoa(mathutil.Min(10, len(realResp.RetryRegions))) + " regions will be logged if the log level is higher than Debug") + logutil.BgLogger().Info("TiFlash found " + strconv.Itoa(len(realResp.RetryRegions)) + " stale regions. Only first " + strconv.Itoa(min(10, len(realResp.RetryRegions))) + " regions will be logged if the log level is higher than Debug") for index, retry := range realResp.RetryRegions { id := tikv.NewRegionVerID(retry.Id, retry.RegionEpoch.ConfVer, retry.RegionEpoch.Version) if index < 10 || log.GetLevel() <= zap.DebugLevel { diff --git a/pkg/store/copr/region_cache.go b/pkg/store/copr/region_cache.go index 14ee9fa8357b6..cf962d9abcd96 100644 --- a/pkg/store/copr/region_cache.go +++ b/pkg/store/copr/region_cache.go @@ -27,7 +27,6 @@ import ( derr "github.com/pingcap/tidb/pkg/store/driver/error" "github.com/pingcap/tidb/pkg/store/driver/options" "github.com/pingcap/tidb/pkg/util/logutil" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/tikv/client-go/v2/metrics" "github.com/tikv/client-go/v2/tikv" "go.uber.org/zap" @@ -202,7 +201,7 @@ func (c *RegionCache) OnSendFailForBatchRegions(bo *Backoffer, store *tikv.Store logutil.Logger(bo.GetCtx()).Info("Should not reach here, OnSendFailForBatchRegions only support TiFlash") return } - logutil.Logger(bo.GetCtx()).Info("Send fail for " + strconv.Itoa(len(regionInfos)) + " regions, will switch region peer for these regions. Only first " + strconv.Itoa(mathutil.Min(10, len(regionInfos))) + " regions will be logged if the log level is higher than Debug") + logutil.Logger(bo.GetCtx()).Info("Send fail for " + strconv.Itoa(len(regionInfos)) + " regions, will switch region peer for these regions. Only first " + strconv.Itoa(min(10, len(regionInfos))) + " regions will be logged if the log level is higher than Debug") for index, ri := range regionInfos { if ri.Meta == nil { continue diff --git a/pkg/store/gcworker/gc_worker_test.go b/pkg/store/gcworker/gc_worker_test.go index b39b7da5ad8e3..99199bfb7e57a 100644 --- a/pkg/store/gcworker/gc_worker_test.go +++ b/pkg/store/gcworker/gc_worker_test.go @@ -1163,7 +1163,7 @@ func TestResolveLockRangeMeetRegionEnlargeCausedByRegionMerge(t *testing.T) { tikvStore: s.tikvStore, scanLocks: func(_ []*txnlock.Lock, key []byte) ([]*txnlock.Lock, *tikv.KeyLocation) { // first time scan locks - region, _, _ := s.cluster.GetRegionByKey(key) + region, _, _, _ := s.cluster.GetRegionByKey(key) if region.GetId() == s.initRegion.regionID { return []*txnlock.Lock{{Key: []byte("a")}, {Key: []byte("b")}}, &tikv.KeyLocation{ diff --git a/pkg/store/helper/helper.go b/pkg/store/helper/helper.go index 18c65a15c92d5..120b83f9d24f3 100644 --- a/pkg/store/helper/helper.go +++ b/pkg/store/helper/helper.go @@ -815,7 +815,7 @@ func (h *Helper) GetRegionsInfoByRange(sk, ek []byte) (*RegionsInfo, error) { // GetRegionByKey gets regioninfo by key func (h *Helper) GetRegionByKey(k []byte) (*RegionInfo, error) { var regionInfo RegionInfo - err := h.requestPD("GetRegionByKey", "GET", fmt.Sprintf("%v/%v", pdapi.RegionKey, url.QueryEscape(string(k))), nil, ®ionInfo) + err := h.requestPD("GetRegionByKey", "GET", fmt.Sprintf("%v/%v", pdapi.RegionByKey, url.QueryEscape(string(k))), nil, ®ionInfo) return ®ionInfo, err } @@ -992,11 +992,13 @@ func (h *Helper) GetPDRegionStats(tableID int64, stats *PDRegionStats, noIndexSt startKey = codec.EncodeBytes([]byte{}, startKey) endKey = codec.EncodeBytes([]byte{}, endKey) - statURL := fmt.Sprintf("%s://%s/pd/api/v1/stats/region?start_key=%s&end_key=%s", + statURL := fmt.Sprintf("%s://%s%s", util.InternalHTTPSchema(), pdAddrs[0], - url.QueryEscape(string(startKey)), - url.QueryEscape(string(endKey))) + pdapi.RegionStatsByStartEndKey( + url.QueryEscape(string(startKey)), + url.QueryEscape(string(endKey)), + )) resp, err := util.InternalHTTPClient().Get(statURL) if err != nil { @@ -1026,9 +1028,10 @@ func (h *Helper) DeletePlacementRule(group string, ruleID string) error { return errors.Trace(err) } - deleteURL := fmt.Sprintf("%s://%s/pd/api/v1/config/rule/%v/%v", + deleteURL := fmt.Sprintf("%s://%s%s/%v/%v", util.InternalHTTPSchema(), pdAddrs[0], + pdapi.PlacementRule, group, ruleID, ) @@ -1061,9 +1064,10 @@ func (h *Helper) SetPlacementRule(rule placement.Rule) error { } m, _ := json.Marshal(rule) - postURL := fmt.Sprintf("%s://%s/pd/api/v1/config/rule", + postURL := fmt.Sprintf("%s://%s%s", util.InternalHTTPSchema(), pdAddrs[0], + pdapi.PlacementRule, ) buf := bytes.NewBuffer(m) resp, err := util.InternalHTTPClient().Post(postURL, "application/json", buf) @@ -1088,9 +1092,10 @@ func (h *Helper) GetGroupRules(group string) ([]placement.Rule, error) { return nil, errors.Trace(err) } - getURL := fmt.Sprintf("%s://%s/pd/api/v1/config/rules/group/%s", + getURL := fmt.Sprintf("%s://%s%s/%s", util.InternalHTTPSchema(), pdAddrs[0], + pdapi.PlacementRulesGroup, group, ) @@ -1134,9 +1139,10 @@ func (h *Helper) PostAccelerateSchedule(tableID int64) error { startKey = codec.EncodeBytes([]byte{}, startKey) endKey = codec.EncodeBytes([]byte{}, endKey) - postURL := fmt.Sprintf("%s://%s/pd/api/v1/regions/accelerate-schedule", + postURL := fmt.Sprintf("%s://%s%s", util.InternalHTTPSchema(), - pdAddrs[0]) + pdAddrs[0], + pdapi.AccelerateSchedule) input := map[string]string{ "start_key": url.QueryEscape(string(startKey)), diff --git a/pkg/store/mockstore/mockcopr/aggregate.go b/pkg/store/mockstore/mockcopr/aggregate.go index a7f1dccf63421..afb9ea6e0972f 100644 --- a/pkg/store/mockstore/mockcopr/aggregate.go +++ b/pkg/store/mockstore/mockcopr/aggregate.go @@ -286,7 +286,7 @@ func (e *streamAggExec) meetNewGroup(row [][]byte) (bool, error) { return false, errors.Trace(err) } if matched { - c, err := d.Compare(e.evalCtx.sc, &e.nextGroupByRow[i], e.groupByCollators[i]) + c, err := d.Compare(e.evalCtx.sc.TypeCtx(), &e.nextGroupByRow[i], e.groupByCollators[i]) if err != nil { return false, errors.Trace(err) } diff --git a/pkg/store/mockstore/mockcopr/topn.go b/pkg/store/mockstore/mockcopr/topn.go index 083d90d2a3876..ac53b08173db6 100644 --- a/pkg/store/mockstore/mockcopr/topn.go +++ b/pkg/store/mockstore/mockcopr/topn.go @@ -50,7 +50,7 @@ func (t *topNSorter) Less(i, j int) bool { v1 := t.rows[i].key[index] v2 := t.rows[j].key[index] - ret, err := v1.Compare(t.sc, &v2, collate.GetCollator(collate.ProtoToCollation(by.Expr.FieldType.Collate))) + ret, err := v1.Compare(t.sc.TypeCtx(), &v2, collate.GetCollator(collate.ProtoToCollation(by.Expr.FieldType.Collate))) if err != nil { t.err = errors.Trace(err) return true @@ -99,7 +99,7 @@ func (t *topNHeap) Less(i, j int) bool { v1 := t.rows[i].key[index] v2 := t.rows[j].key[index] - ret, err := v1.Compare(t.sc, &v2, collate.GetCollator(collate.ProtoToCollation(by.Expr.FieldType.Collate))) + ret, err := v1.Compare(t.sc.TypeCtx(), &v2, collate.GetCollator(collate.ProtoToCollation(by.Expr.FieldType.Collate))) if err != nil { t.err = errors.Trace(err) return true diff --git a/pkg/store/mockstore/unistore/cophandler/cop_handler_test.go b/pkg/store/mockstore/unistore/cophandler/cop_handler_test.go index 41eb69bb3e283..87aea1bd48dd6 100644 --- a/pkg/store/mockstore/unistore/cophandler/cop_handler_test.go +++ b/pkg/store/mockstore/unistore/cophandler/cop_handler_test.go @@ -352,10 +352,10 @@ func TestPointGet(t *testing.T) { // verify the returned rows value as input expectedRow := data.rows[handle] - eq, err := returnedRow[0].Compare(nil, &expectedRow[0], collate.GetBinaryCollator()) + eq, err := returnedRow[0].Compare(types.DefaultStmtNoWarningContext, &expectedRow[0], collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, eq) - eq, err = returnedRow[1].Compare(nil, &expectedRow[1], collate.GetBinaryCollator()) + eq, err = returnedRow[1].Compare(types.DefaultStmtNoWarningContext, &expectedRow[1], collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, eq) } diff --git a/pkg/store/mockstore/unistore/cophandler/mpp_exec.go b/pkg/store/mockstore/unistore/cophandler/mpp_exec.go index c86b690298e7f..4ab16a8bb1dbd 100644 --- a/pkg/store/mockstore/unistore/cophandler/mpp_exec.go +++ b/pkg/store/mockstore/unistore/cophandler/mpp_exec.go @@ -850,7 +850,7 @@ type joinExec struct { } func (e *joinExec) getHashKey(keyCol types.Datum) (str string, err error) { - keyCol, err = keyCol.ConvertTo(e.sc, e.comKeyTp) + keyCol, err = keyCol.ConvertTo(e.sc.TypeCtx(), e.comKeyTp) if err != nil { return str, errors.Trace(err) } @@ -1076,7 +1076,7 @@ func (e *aggExec) processAllRows() (*chunk.Chunk, error) { result := agg.GetResult(aggCtxs[i]) if e.fieldTypes[i].GetType() == mysql.TypeLonglong && result.Kind() == types.KindMysqlDecimal { var err error - result, err = result.ConvertTo(e.sc, e.fieldTypes[i]) + result, err = result.ConvertTo(e.sc.TypeCtx(), e.fieldTypes[i]) if err != nil { return nil, errors.Trace(err) } diff --git a/pkg/store/mockstore/unistore/cophandler/topn.go b/pkg/store/mockstore/unistore/cophandler/topn.go index de2798a2902b9..80f0661710b23 100644 --- a/pkg/store/mockstore/unistore/cophandler/topn.go +++ b/pkg/store/mockstore/unistore/cophandler/topn.go @@ -53,7 +53,7 @@ func (t *topNSorter) Less(i, j int) bool { v1 := t.rows[i].key[index] v2 := t.rows[j].key[index] - ret, err := v1.Compare(t.sc, &v2, collate.GetCollator(collate.ProtoToCollation(by.Expr.FieldType.Collate))) + ret, err := v1.Compare(t.sc.TypeCtx(), &v2, collate.GetCollator(collate.ProtoToCollation(by.Expr.FieldType.Collate))) if err != nil { t.err = errors.Trace(err) return true @@ -107,7 +107,7 @@ func (t *topNHeap) Less(i, j int) bool { if expression.FieldTypeFromPB(by.GetExpr().GetFieldType()).GetType() == mysql.TypeEnum { ret = cmp.Compare(v1.GetUint64(), v2.GetUint64()) } else { - ret, err = v1.Compare(t.sc, &v2, collate.GetCollator(collate.ProtoToCollation(by.Expr.FieldType.Collate))) + ret, err = v1.Compare(t.sc.TypeCtx(), &v2, collate.GetCollator(collate.ProtoToCollation(by.Expr.FieldType.Collate))) if err != nil { t.err = errors.Trace(err) return true diff --git a/pkg/store/mockstore/unistore/tikv/BUILD.bazel b/pkg/store/mockstore/unistore/tikv/BUILD.bazel index 793f41ef45357..978b52db23886 100644 --- a/pkg/store/mockstore/unistore/tikv/BUILD.bazel +++ b/pkg/store/mockstore/unistore/tikv/BUILD.bazel @@ -33,7 +33,6 @@ go_library( "//pkg/tablecodec", "//pkg/types", "//pkg/util/codec", - "//pkg/util/mathutil", "//pkg/util/rowcodec", "@com_github_dgryski_go_farm//:go-farm", "@com_github_gogo_protobuf//proto", diff --git a/pkg/store/mockstore/unistore/tikv/mock_region.go b/pkg/store/mockstore/unistore/tikv/mock_region.go index 9ca2d782fb4eb..41a4da282cd37 100644 --- a/pkg/store/mockstore/unistore/tikv/mock_region.go +++ b/pkg/store/mockstore/unistore/tikv/mock_region.go @@ -243,7 +243,7 @@ func (rm *MockRegionManager) GetRegion(id uint64) *metapb.Region { } // GetRegionByKey gets a region by the key. -func (rm *MockRegionManager) GetRegionByKey(key []byte) (region *metapb.Region, peer *metapb.Peer, buckets *metapb.Buckets) { +func (rm *MockRegionManager) GetRegionByKey(key []byte) (region *metapb.Region, peer *metapb.Peer, buckets *metapb.Buckets, downPeers []*metapb.Peer) { rm.mu.RLock() defer rm.mu.RUnlock() rm.sortedRegions.AscendGreaterOrEqual(newBtreeSearchItem(key), func(item btree.Item) bool { @@ -255,9 +255,9 @@ func (rm *MockRegionManager) GetRegionByKey(key []byte) (region *metapb.Region, return false }) if region == nil || !rm.regionContainsKey(region, key) { - return nil, nil, nil + return nil, nil, nil, nil } - return proto.Clone(region).(*metapb.Region), proto.Clone(region.Peers[0]).(*metapb.Peer), nil + return proto.Clone(region).(*metapb.Region), proto.Clone(region.Peers[0]).(*metapb.Peer), nil, nil } // GetRegionByEndKey gets a region by the end key. @@ -722,8 +722,8 @@ func (pd *MockPD) GetStore(ctx context.Context, storeID uint64) (*metapb.Store, // GetRegion implements gRPC PDServer. func (pd *MockPD) GetRegion(ctx context.Context, key []byte, opts ...pdclient.GetRegionOption) (*pdclient.Region, error) { - r, p, b := pd.rm.GetRegionByKey(key) - return &pdclient.Region{Meta: r, Leader: p, Buckets: b}, nil + r, p, b, d := pd.rm.GetRegionByKey(key) + return &pdclient.Region{Meta: r, Leader: p, Buckets: b, DownPeers: d}, nil } // GetRegionByID implements gRPC PDServer. diff --git a/pkg/store/mockstore/unistore/tikv/write.go b/pkg/store/mockstore/unistore/tikv/write.go index 0d40dd4bb1c80..722eb6fa870d8 100644 --- a/pkg/store/mockstore/unistore/tikv/write.go +++ b/pkg/store/mockstore/unistore/tikv/write.go @@ -27,7 +27,6 @@ import ( "github.com/pingcap/tidb/pkg/store/mockstore/unistore/lockstore" "github.com/pingcap/tidb/pkg/store/mockstore/unistore/tikv/dbreader" "github.com/pingcap/tidb/pkg/store/mockstore/unistore/tikv/mvcc" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -332,7 +331,7 @@ func (writer *dbWriter) collectRangeKeys(it *badger.Iterator, startKey, endKey [ func (writer *dbWriter) deleteKeysInBatch(latchHandle mvcc.LatchHandle, keys []y.Key, batchSize int) error { for len(keys) > 0 { - batchSize := mathutil.Min(len(keys), batchSize) + batchSize := min(len(keys), batchSize) batchKeys := keys[:batchSize] keys = keys[batchSize:] hashVals := userKeysToHashVals(batchKeys...) diff --git a/pkg/table/OWNERS b/pkg/table/OWNERS new file mode 100644 index 0000000000000..76bd65b26fb7f --- /dev/null +++ b/pkg/table/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-table diff --git a/pkg/table/column.go b/pkg/table/column.go index c7e7a8469c055..1b3a6a5eaf7bd 100644 --- a/pkg/table/column.go +++ b/pkg/table/column.go @@ -295,7 +295,7 @@ func handleZeroDatetime(ctx sessionctx.Context, col *model.ColumnInfo, casted ty // TODO: change the third arg to TypeField. Not pass ColumnInfo. func CastValue(ctx sessionctx.Context, val types.Datum, col *model.ColumnInfo, returnErr, forceIgnoreTruncate bool) (casted types.Datum, err error) { sc := ctx.GetSessionVars().StmtCtx - casted, err = val.ConvertTo(sc, &col.FieldType) + casted, err = val.ConvertTo(sc.TypeCtx(), &col.FieldType) // TODO: make sure all truncate errors are handled by ConvertTo. if returnErr && err != nil { return casted, err @@ -713,7 +713,7 @@ func FillVirtualColumnValue(virtualRetTypes []*types.FieldType, virtualColumnInd } // Clip to zero if get negative value after cast to unsigned. - if mysql.HasUnsignedFlag(colInfos[idx].FieldType.GetFlag()) && !castDatum.IsNull() && !sctx.GetSessionVars().StmtCtx.TypeFlags().ClipNegativeToZero() { + if mysql.HasUnsignedFlag(colInfos[idx].FieldType.GetFlag()) && !castDatum.IsNull() && sctx.GetSessionVars().StmtCtx.TypeFlags().AllowNegativeToUnsigned() { switch datum.Kind() { case types.KindInt64: if datum.GetInt64() < 0 { diff --git a/pkg/table/column_test.go b/pkg/table/column_test.go index 48caa4257539d..c951fdf60e79f 100644 --- a/pkg/table/column_test.go +++ b/pkg/table/column_test.go @@ -132,7 +132,7 @@ func TestHandleBadNull(t *testing.T) { d := types.Datum{} err := col.HandleBadNull(&d, sc, 0) require.NoError(t, err) - cmp, err := d.Compare(sc, &types.Datum{}, collate.GetBinaryCollator()) + cmp, err := d.Compare(sc.TypeCtx(), &types.Datum{}, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) @@ -262,7 +262,7 @@ func TestGetZeroValue(t *testing.T) { colInfo := &model.ColumnInfo{FieldType: *tt.ft} zv := GetZeroValue(colInfo) require.Equal(t, tt.value.Kind(), zv.Kind()) - cmp, err := zv.Compare(sc, &tt.value, collate.GetCollator(tt.ft.GetCollate())) + cmp, err := zv.Compare(sc.TypeCtx(), &tt.value, collate.GetCollator(tt.ft.GetCollate())) require.NoError(t, err) require.Equal(t, 0, cmp) }) diff --git a/pkg/table/tables/mutation_checker.go b/pkg/table/tables/mutation_checker.go index 3ecb0b39080c8..3ef2c42730b1e 100644 --- a/pkg/table/tables/mutation_checker.go +++ b/pkg/table/tables/mutation_checker.go @@ -300,7 +300,7 @@ func checkRowInsertionConsistency( for columnID, decodedDatum := range decodedData { inputDatum := rowToInsert[columnIDToInfo[columnID].Offset] - cmp, err := decodedDatum.Compare(sessVars.StmtCtx, &inputDatum, collate.GetCollator(decodedDatum.Collation())) + cmp, err := decodedDatum.Compare(sessVars.StmtCtx.TypeCtx(), &inputDatum, collate.GetCollator(decodedDatum.Collation())) if err != nil { return errors.Trace(err) } @@ -398,7 +398,7 @@ func CompareIndexAndVal(sctx *stmtctx.StatementContext, rowVal types.Datum, idxV count := bj.GetElemCount() for elemIdx := 0; elemIdx < count; elemIdx++ { jsonDatum := types.NewJSONDatum(bj.ArrayGetElem(elemIdx)) - cmpRes, err = jsonDatum.Compare(sctx, &idxVal, collate.GetBinaryCollator()) + cmpRes, err = jsonDatum.Compare(sctx.TypeCtx(), &idxVal, collate.GetBinaryCollator()) if err != nil { return 0, errors.Trace(err) } @@ -407,7 +407,7 @@ func CompareIndexAndVal(sctx *stmtctx.StatementContext, rowVal types.Datum, idxV } } } else { - cmpRes, err = idxVal.Compare(sctx, &rowVal, collator) + cmpRes, err = idxVal.Compare(sctx.TypeCtx(), &rowVal, collator) } return cmpRes, err } diff --git a/pkg/table/tables/mutation_checker_test.go b/pkg/table/tables/mutation_checker_test.go index bca9cbb53640d..dce5dd12cb743 100644 --- a/pkg/table/tables/mutation_checker_test.go +++ b/pkg/table/tables/mutation_checker_test.go @@ -238,7 +238,7 @@ func TestCheckIndexKeysAndCheckHandleConsistency(t *testing.T) { types.NewStringDatum("some string"), types.NewTimeDatum(now), } - anotherTime, err := now.Add(sessVars.StmtCtx, types.NewDuration(24, 0, 0, 0, 0)) + anotherTime, err := now.Add(sessVars.StmtCtx.TypeCtx(), types.NewDuration(24, 0, 0, 0, 0)) require.Nil(t, err) rowToRemove := []types.Datum{ types.NewStringDatum("old string"), diff --git a/pkg/table/tables/partition.go b/pkg/table/tables/partition.go index a7af6b55b5239..53178d07decdb 100644 --- a/pkg/table/tables/partition.go +++ b/pkg/table/tables/partition.go @@ -1103,7 +1103,7 @@ func (lp *ForListColumnPruning) genConstExprKey(ctx sessionctx.Context, sc *stmt } func (lp *ForListColumnPruning) genKey(sc *stmtctx.StatementContext, v types.Datum) ([]byte, error) { - v, err := v.ConvertTo(sc, lp.valueTp) + v, err := v.ConvertTo(sc.TypeCtx(), lp.valueTp) if err != nil { return nil, errors.Trace(err) } @@ -1456,7 +1456,7 @@ func (t *partitionedTable) locateHashPartition(ctx sessionctx.Context, partExpr data = r[col.Index] default: var err error - data, err = r[col.Index].ConvertTo(ctx.GetSessionVars().StmtCtx, types.NewFieldType(mysql.TypeLong)) + data, err = r[col.Index].ConvertTo(ctx.GetSessionVars().StmtCtx.TypeCtx(), types.NewFieldType(mysql.TypeLong)) if err != nil { return 0, err } diff --git a/pkg/tablecodec/OWNERS b/pkg/tablecodec/OWNERS new file mode 100644 index 0000000000000..76bd65b26fb7f --- /dev/null +++ b/pkg/tablecodec/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-table diff --git a/pkg/tablecodec/tablecodec_test.go b/pkg/tablecodec/tablecodec_test.go index 1d2997367f666..b3e133f74f159 100644 --- a/pkg/tablecodec/tablecodec_test.go +++ b/pkg/tablecodec/tablecodec_test.go @@ -117,7 +117,7 @@ func TestRowCodec(t *testing.T) { for i, col := range cols { v, ok := r[col.id] require.True(t, ok) - equal, err1 := v.Compare(sc, &row[i], collate.GetBinaryCollator()) + equal, err1 := v.Compare(sc.TypeCtx(), &row[i], collate.GetBinaryCollator()) require.NoError(t, err1) require.Equalf(t, 0, equal, "expect: %v, got %v", row[i], v) } @@ -131,7 +131,7 @@ func TestRowCodec(t *testing.T) { for i, col := range cols { v, ok := r[col.id] require.True(t, ok) - equal, err1 := v.Compare(sc, &row[i], collate.GetBinaryCollator()) + equal, err1 := v.Compare(sc.TypeCtx(), &row[i], collate.GetBinaryCollator()) require.NoError(t, err1) require.Equal(t, 0, equal) } @@ -149,7 +149,7 @@ func TestRowCodec(t *testing.T) { } v, ok := r[col.id] require.True(t, ok) - equal, err1 := v.Compare(sc, &row[i], collate.GetBinaryCollator()) + equal, err1 := v.Compare(sc.TypeCtx(), &row[i], collate.GetBinaryCollator()) require.NoError(t, err1) require.Equal(t, 0, equal) } @@ -177,7 +177,7 @@ func TestDecodeColumnValue(t *testing.T) { tp := types.NewFieldType(mysql.TypeTimestamp) d1, err := DecodeColumnValue(bs, tp, sc.TimeZone()) require.NoError(t, err) - cmp, err := d1.Compare(sc, &d, collate.GetBinaryCollator()) + cmp, err := d1.Compare(sc.TypeCtx(), &d, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) @@ -194,7 +194,7 @@ func TestDecodeColumnValue(t *testing.T) { tp.SetElems(elems) d1, err = DecodeColumnValue(bs, tp, sc.TimeZone()) require.NoError(t, err) - cmp, err = d1.Compare(sc, &d, collate.GetCollator(tp.GetCollate())) + cmp, err = d1.Compare(sc.TypeCtx(), &d, collate.GetCollator(tp.GetCollate())) require.NoError(t, err) require.Equal(t, 0, cmp) @@ -209,7 +209,7 @@ func TestDecodeColumnValue(t *testing.T) { tp.SetFlen(24) d1, err = DecodeColumnValue(bs, tp, sc.TimeZone()) require.NoError(t, err) - cmp, err = d1.Compare(sc, &d, collate.GetBinaryCollator()) + cmp, err = d1.Compare(sc.TypeCtx(), &d, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) @@ -223,7 +223,7 @@ func TestDecodeColumnValue(t *testing.T) { tp = types.NewFieldType(mysql.TypeEnum) d1, err = DecodeColumnValue(bs, tp, sc.TimeZone()) require.NoError(t, err) - cmp, err = d1.Compare(sc, &d, collate.GetCollator(tp.GetCollate())) + cmp, err = d1.Compare(sc.TypeCtx(), &d, collate.GetCollator(tp.GetCollate())) require.NoError(t, err) require.Equal(t, 0, cmp) } @@ -234,7 +234,7 @@ func TestUnflattenDatums(t *testing.T) { tps := []*types.FieldType{types.NewFieldType(mysql.TypeLonglong)} output, err := UnflattenDatums(input, tps, sc.TimeZone()) require.NoError(t, err) - cmp, err := input[0].Compare(sc, &output[0], collate.GetBinaryCollator()) + cmp, err := input[0].Compare(sc.TypeCtx(), &output[0], collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) @@ -243,7 +243,7 @@ func TestUnflattenDatums(t *testing.T) { tps[0].SetCollate("utf8mb4_unicode_ci") output, err = UnflattenDatums(input, tps, sc.TimeZone()) require.NoError(t, err) - cmp, err = input[0].Compare(sc, &output[0], collate.GetBinaryCollator()) + cmp, err = input[0].Compare(sc.TypeCtx(), &output[0], collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) require.Equal(t, "utf8mb4_unicode_ci", output[0].Collation()) @@ -260,11 +260,11 @@ func TestTimeCodec(t *testing.T) { row := make([]types.Datum, colLen) row[0] = types.NewIntDatum(100) row[1] = types.NewBytesDatum([]byte("abc")) - ts, err := types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.UTC), + ts, err := types.ParseTimestamp(types.DefaultStmtNoWarningContext, "2016-06-23 11:30:45") require.NoError(t, err) row[2] = types.NewDatum(ts) - du, _, err := types.ParseDuration(nil, "12:59:59.999999", 6) + du, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, "12:59:59.999999", 6) require.NoError(t, err) row[3] = types.NewDatum(du) @@ -292,7 +292,7 @@ func TestTimeCodec(t *testing.T) { for i, col := range cols { v, ok := r[col.id] require.True(t, ok) - equal, err1 := v.Compare(sc, &row[i], collate.GetBinaryCollator()) + equal, err1 := v.Compare(sc.TypeCtx(), &row[i], collate.GetBinaryCollator()) require.Nil(t, err1) require.Equal(t, 0, equal) } diff --git a/pkg/testkit/testenv/BUILD.bazel b/pkg/testkit/testenv/BUILD.bazel index 970c503ebb732..abf9ab5e003f5 100644 --- a/pkg/testkit/testenv/BUILD.bazel +++ b/pkg/testkit/testenv/BUILD.bazel @@ -5,5 +5,4 @@ go_library( srcs = ["testenv.go"], importpath = "github.com/pingcap/tidb/pkg/testkit/testenv", visibility = ["//visibility:public"], - deps = ["//pkg/util/mathutil"], ) diff --git a/pkg/testkit/testenv/testenv.go b/pkg/testkit/testenv/testenv.go index f75668fd739cf..e0d2da4e856fb 100644 --- a/pkg/testkit/testenv/testenv.go +++ b/pkg/testkit/testenv/testenv.go @@ -16,11 +16,9 @@ package testenv import ( "runtime" - - "github.com/pingcap/tidb/pkg/util/mathutil" ) // SetGOMAXPROCSForTest sets GOMAXPROCS to 16 if it is greater than 16. func SetGOMAXPROCSForTest() { - runtime.GOMAXPROCS(mathutil.Min(16, runtime.GOMAXPROCS(0))) + runtime.GOMAXPROCS(min(16, runtime.GOMAXPROCS(0))) } diff --git a/pkg/testkit/testutil/require.go b/pkg/testkit/testutil/require.go index 02acb7d604b55..876e9ffdf10b1 100644 --- a/pkg/testkit/testutil/require.go +++ b/pkg/testkit/testutil/require.go @@ -21,7 +21,6 @@ import ( "testing" "github.com/pingcap/tidb/pkg/kv" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/collate" "github.com/stretchr/testify/require" @@ -29,8 +28,7 @@ import ( // DatumEqual verifies that the actual value is equal to the expected value. For string datum, they are compared by the binary collation. func DatumEqual(t testing.TB, expected, actual types.Datum, msgAndArgs ...interface{}) { - sc := stmtctx.NewStmtCtx() - res, err := actual.Compare(sc, &expected, collate.GetBinaryCollator()) + res, err := actual.Compare(types.DefaultStmtNoWarningContext, &expected, collate.GetBinaryCollator()) require.NoError(t, err, msgAndArgs) require.Zero(t, res, msgAndArgs) } diff --git a/pkg/tidb-binlog/OWNERS b/pkg/tidb-binlog/OWNERS new file mode 100644 index 0000000000000..c195022115e3a --- /dev/null +++ b/pkg/tidb-binlog/OWNERS @@ -0,0 +1,5 @@ +# See the OWNERS docs at https://go.k8s.io/owners +options: + no_parent_owners: true +approvers: + - sig-approvers-tidb-binlog diff --git a/pkg/ttl/cache/BUILD.bazel b/pkg/ttl/cache/BUILD.bazel index 839c53f1822b2..a9e6b97887d9f 100644 --- a/pkg/ttl/cache/BUILD.bazel +++ b/pkg/ttl/cache/BUILD.bazel @@ -26,7 +26,6 @@ go_library( "//pkg/util/chunk", "//pkg/util/codec", "//pkg/util/logutil", - "//pkg/util/mathutil", "@com_github_pingcap_errors//:errors", "@com_github_tikv_client_go_v2//tikv", "@org_uber_go_zap//:zap", diff --git a/pkg/ttl/cache/table.go b/pkg/ttl/cache/table.go index 1e69faef9d34b..8be33494900b0 100644 --- a/pkg/ttl/cache/table.go +++ b/pkg/ttl/cache/table.go @@ -33,7 +33,6 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/codec" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/tikv/client-go/v2/tikv" ) @@ -341,7 +340,7 @@ func (t *PhysicalTable) splitRawKeyRanges(ctx context.Context, store tikv.Storag regionsPerRange := len(regionIDs) / splitCnt oversizeCnt := len(regionIDs) % splitCnt - ranges := make([]kv.KeyRange, 0, mathutil.Min(len(regionIDs), splitCnt)) + ranges := make([]kv.KeyRange, 0, min(len(regionIDs), splitCnt)) for len(regionIDs) > 0 { startRegion, err := regionCache.LocateRegionByID(tikv.NewBackofferWithVars(ctx, 20000, nil), regionIDs[0]) diff --git a/pkg/ttl/sqlbuilder/BUILD.bazel b/pkg/ttl/sqlbuilder/BUILD.bazel index 067fc11aeed2b..936719355c613 100644 --- a/pkg/ttl/sqlbuilder/BUILD.bazel +++ b/pkg/ttl/sqlbuilder/BUILD.bazel @@ -12,8 +12,8 @@ go_library( "//pkg/parser/mysql", "//pkg/ttl/cache", "//pkg/types", - "//pkg/util/sqlexec", - "@com_github_pkg_errors//:errors", + "//pkg/util/sqlescape", + "@com_github_pingcap_errors//:errors", ], ) diff --git a/pkg/ttl/sqlbuilder/sql.go b/pkg/ttl/sqlbuilder/sql.go index bf92dafab7cbf..e1329bd268781 100644 --- a/pkg/ttl/sqlbuilder/sql.go +++ b/pkg/ttl/sqlbuilder/sql.go @@ -22,14 +22,14 @@ import ( "strings" "time" + "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/ast" "github.com/pingcap/tidb/pkg/parser/format" "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/ttl/cache" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/sqlexec" - "github.com/pkg/errors" + "github.com/pingcap/tidb/pkg/util/sqlescape" ) func writeHex(in io.Writer, d types.Datum) error { @@ -45,7 +45,7 @@ func writeDatum(restoreCtx *format.RestoreCtx, d types.Datum, ft *types.FieldTyp if mysql.HasBinaryFlag(ft.GetFlag()) { return writeHex(restoreCtx.In, d) } - _, err := fmt.Fprintf(restoreCtx.In, "'%s'", sqlexec.EscapeString(d.GetString())) + _, err := fmt.Fprintf(restoreCtx.In, "'%s'", sqlescape.EscapeString(d.GetString())) return err } expr := ast.NewValueExpr(d.GetValue(), ft.GetCharset(), ft.GetCollate()) diff --git a/pkg/types/BUILD.bazel b/pkg/types/BUILD.bazel index 1eae8bd68cc3f..416a8f81070f8 100644 --- a/pkg/types/BUILD.bazel +++ b/pkg/types/BUILD.bazel @@ -36,6 +36,7 @@ go_library( "overflow.go", "set.go", "time.go", + "truncate.go", ], importpath = "github.com/pingcap/tidb/pkg/types", visibility = [ @@ -50,11 +51,10 @@ go_library( "//pkg/parser/opcode", "//pkg/parser/terror", "//pkg/parser/types", - "//pkg/sessionctx/stmtctx", - "//pkg/types/context", "//pkg/util/collate", "//pkg/util/dbterror", "//pkg/util/hack", + "//pkg/util/intest", "//pkg/util/kvcache", "//pkg/util/logutil", "//pkg/util/mathutil", @@ -75,6 +75,7 @@ go_test( "binary_literal_test.go", "compare_test.go", "const_test.go", + "context_test.go", "convert_test.go", "core_time_test.go", "datum_test.go", @@ -103,11 +104,9 @@ go_test( "//pkg/parser/charset", "//pkg/parser/mysql", "//pkg/parser/terror", - "//pkg/sessionctx/stmtctx", "//pkg/testkit/testsetup", "//pkg/util/collate", "//pkg/util/hack", - "//pkg/util/mock", "@com_github_pingcap_errors//:errors", "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", diff --git a/pkg/types/binary_literal_test.go b/pkg/types/binary_literal_test.go index 44ea4ad7d070f..d6f16e7c874c5 100644 --- a/pkg/types/binary_literal_test.go +++ b/pkg/types/binary_literal_test.go @@ -206,7 +206,7 @@ func TestBinaryLiteral(t *testing.T) { {"0x1010ffff8080ff12", 0x1010ffff8080ff12, false}, {"0x1010ffff8080ff12ff", 0xffffffffffffffff, true}, } - ctx := DefaultNoWarningContext + ctx := DefaultStmtNoWarningContext for _, item := range tbl { hex, err := ParseHexStr(item.Input) require.NoError(t, err) diff --git a/pkg/types/compare_test.go b/pkg/types/compare_test.go index b5d76497ec05e..1e08f58cfead8 100644 --- a/pkg/types/compare_test.go +++ b/pkg/types/compare_test.go @@ -20,7 +20,6 @@ import ( "time" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/util/collate" "github.com/stretchr/testify/require" ) @@ -146,11 +145,10 @@ func TestCompare(t *testing.T) { } func compareForTest(a, b interface{}) (int, error) { - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) aDatum := NewDatum(a) bDatum := NewDatum(b) - return aDatum.Compare(sc, &bDatum, collate.GetBinaryCollator()) + return aDatum.Compare(ctx, &bDatum, collate.GetBinaryCollator()) } func TestCompareDatum(t *testing.T) { @@ -168,14 +166,13 @@ func TestCompareDatum(t *testing.T) { {Datum{}, MinNotNullDatum(), -1}, {MinNotNullDatum(), MaxValueDatum(), -1}, } - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) for i, tt := range cmpTbl { - ret, err := tt.lhs.Compare(sc, &tt.rhs, collate.GetBinaryCollator()) + ret, err := tt.lhs.Compare(ctx, &tt.rhs, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, tt.ret, ret, "%d %v %v", i, tt.lhs, tt.rhs) - ret, err = tt.rhs.Compare(sc, &tt.lhs, collate.GetBinaryCollator()) + ret, err = tt.rhs.Compare(ctx, &tt.lhs, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, -tt.ret, ret, "%d %v %v", i, tt.lhs, tt.rhs) } diff --git a/pkg/types/context.go b/pkg/types/context.go index 0182804aff83f..b969e4801f553 100644 --- a/pkg/types/context.go +++ b/pkg/types/context.go @@ -15,24 +15,238 @@ package types import ( - "github.com/pingcap/tidb/pkg/types/context" + "time" + + "github.com/pingcap/tidb/pkg/util/intest" ) -// TODO: move a contents in `types/context/context.go` to this file after refactor finished. -// Because package `types` has a dependency on `sessionctx/stmtctx`, we need a separate package `type/context` to define -// context objects during refactor works. +// StrictFlags is a flags with a fields unset and has the most strict behavior. +const StrictFlags Flags = 0 -// Context is an alias of `context.Context` -type Context = context.Context +// Flags indicate how to handle the conversion of a value. +type Flags uint16 -// Flags is an alias of `Flags` -type Flags = context.Flags +const ( + // FlagIgnoreTruncateErr indicates to ignore the truncate error. + // If this flag is set, `FlagTruncateAsWarning` will be ignored. + FlagIgnoreTruncateErr Flags = 1 << iota + // FlagTruncateAsWarning indicates to append the truncate error to warnings instead of returning it to user. + FlagTruncateAsWarning + // FlagAllowNegativeToUnsigned indicates to allow the casting from negative to unsigned int. + // When this flag is not set by default, casting a negative value to unsigned results an overflow error. + // Otherwise, a negative value will be cast to the corresponding unsigned value without any error. + // For example, when casting -1 to an unsigned bigint with `FlagAllowNegativeToUnsigned` set, + // we will get `18446744073709551615` which is the biggest unsigned value. + FlagAllowNegativeToUnsigned + // FlagIgnoreZeroDateErr indicates to ignore the zero-date error. + // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_no_zero_date for details about the "zero-date" error. + // If this flag is set, `FlagZeroDateAsWarning` will be ignored. + // + // TODO: `FlagIgnoreZeroDateErr` and `FlagZeroDateAsWarning` don't represent the comments right now, because the + // errors related with `time` and `duration` are handled directly according to SQL mode in many places (expression, + // ddl ...). These error handling will be refined in the future. Currently, the `FlagZeroDateAsWarning` is not used, + // and the `FlagIgnoreZeroDateErr` is used to allow or disallow casting zero to date in `alter` statement. See #25728 + // This flag is the reverse of `NoZeroDate` in #30507. It's set to `true` for most context, and is only set to + // `false` for `alter` (and `create`) statements. + FlagIgnoreZeroDateErr + // FlagIgnoreZeroInDateErr indicates to ignore the zero-in-date error. + // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_no_zero_in_date for details about the "zero-in-date" error. + FlagIgnoreZeroInDateErr + // FlagIgnoreInvalidDateErr indicates to ignore the invalid-date error. + // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_allow_invalid_dates for details about the "invalid-date" error. + FlagIgnoreInvalidDateErr + // FlagSkipASCIICheck indicates to skip the ASCII check when converting the value to an ASCII string. + FlagSkipASCIICheck + // FlagSkipUTF8Check indicates to skip the UTF8 check when converting the value to an UTF8MB3 string. + FlagSkipUTF8Check + // FlagSkipUTF8MB4Check indicates to skip the UTF8MB4 check when converting the value to an UTF8 string. + FlagSkipUTF8MB4Check +) -// StrictFlags is a flags with a fields unset and has the most strict behavior. -const StrictFlags = context.StrictFlags +// AllowNegativeToUnsigned indicates whether the flag `FlagAllowNegativeToUnsigned` is set +func (f Flags) AllowNegativeToUnsigned() bool { + return f&FlagAllowNegativeToUnsigned != 0 +} + +// WithAllowNegativeToUnsigned returns a new flags with `FlagAllowNegativeToUnsigned` set/unset according to the clip parameter +func (f Flags) WithAllowNegativeToUnsigned(clip bool) Flags { + if clip { + return f | FlagAllowNegativeToUnsigned + } + return f &^ FlagAllowNegativeToUnsigned +} + +// SkipASCIICheck indicates whether the flag `FlagSkipASCIICheck` is set +func (f Flags) SkipASCIICheck() bool { + return f&FlagSkipASCIICheck != 0 +} + +// WithSkipSACIICheck returns a new flags with `FlagSkipASCIICheck` set/unset according to the skip parameter +func (f Flags) WithSkipSACIICheck(skip bool) Flags { + if skip { + return f | FlagSkipASCIICheck + } + return f &^ FlagSkipASCIICheck +} + +// SkipUTF8Check indicates whether the flag `FlagSkipUTF8Check` is set +func (f Flags) SkipUTF8Check() bool { + return f&FlagSkipUTF8Check != 0 +} + +// WithSkipUTF8Check returns a new flags with `FlagSkipUTF8Check` set/unset according to the skip parameter +func (f Flags) WithSkipUTF8Check(skip bool) Flags { + if skip { + return f | FlagSkipUTF8Check + } + return f &^ FlagSkipUTF8Check +} + +// SkipUTF8MB4Check indicates whether the flag `FlagSkipUTF8MB4Check` is set +func (f Flags) SkipUTF8MB4Check() bool { + return f&FlagSkipUTF8MB4Check != 0 +} + +// WithSkipUTF8MB4Check returns a new flags with `FlagSkipUTF8MB4Check` set/unset according to the skip parameter +func (f Flags) WithSkipUTF8MB4Check(skip bool) Flags { + if skip { + return f | FlagSkipUTF8MB4Check + } + return f &^ FlagSkipUTF8MB4Check +} + +// IgnoreTruncateErr indicates whether the flag `FlagIgnoreTruncateErr` is set +func (f Flags) IgnoreTruncateErr() bool { + return f&FlagIgnoreTruncateErr != 0 +} + +// WithIgnoreTruncateErr returns a new flags with `FlagIgnoreTruncateErr` set/unset according to the skip parameter +func (f Flags) WithIgnoreTruncateErr(ignore bool) Flags { + if ignore { + return f | FlagIgnoreTruncateErr + } + return f &^ FlagIgnoreTruncateErr +} + +// TruncateAsWarning indicates whether the flag `FlagTruncateAsWarning` is set +func (f Flags) TruncateAsWarning() bool { + return f&FlagTruncateAsWarning != 0 +} + +// WithTruncateAsWarning returns a new flags with `FlagTruncateAsWarning` set/unset according to the skip parameter +func (f Flags) WithTruncateAsWarning(warn bool) Flags { + if warn { + return f | FlagTruncateAsWarning + } + return f &^ FlagTruncateAsWarning +} + +// IgnoreZeroInDate indicates whether the flag `FlagIgnoreZeroInData` is set +func (f Flags) IgnoreZeroInDate() bool { + return f&FlagIgnoreZeroInDateErr != 0 +} + +// WithIgnoreZeroInDate returns a new flags with `FlagIgnoreZeroInDateErr` set/unset according to the ignore parameter +func (f Flags) WithIgnoreZeroInDate(ignore bool) Flags { + if ignore { + return f | FlagIgnoreZeroInDateErr + } + return f &^ FlagIgnoreZeroInDateErr +} + +// IgnoreInvalidDateErr indicates whether the flag `FlagIgnoreInvalidDateErr` is set +func (f Flags) IgnoreInvalidDateErr() bool { + return f&FlagIgnoreInvalidDateErr != 0 +} + +// WithIgnoreInvalidDateErr returns a new flags with `FlagIgnoreInvalidDateErr` set/unset according to the ignore parameter +func (f Flags) WithIgnoreInvalidDateErr(ignore bool) Flags { + if ignore { + return f | FlagIgnoreInvalidDateErr + } + return f &^ FlagIgnoreInvalidDateErr +} + +// IgnoreZeroDateErr indicates whether the flag `FlagIgnoreZeroDateErr` is set +func (f Flags) IgnoreZeroDateErr() bool { + return f&FlagIgnoreZeroDateErr != 0 +} + +// WithIgnoreZeroDateErr returns a new flags with `FlagIgnoreZeroDateErr` set/unset according to the ignore parameter +func (f Flags) WithIgnoreZeroDateErr(ignore bool) Flags { + if ignore { + return f | FlagIgnoreZeroDateErr + } + return f &^ FlagIgnoreZeroDateErr +} + +// Context provides the information when converting between different types. +type Context struct { + flags Flags + loc *time.Location + appendWarningFn func(err error) +} // NewContext creates a new `Context` -var NewContext = context.NewContext +func NewContext(flags Flags, loc *time.Location, appendWarningFn func(err error)) Context { + intest.Assert(loc != nil && appendWarningFn != nil) + return Context{ + flags: flags, + loc: loc, + appendWarningFn: appendWarningFn, + } +} + +// Flags returns the flags of the context +func (c *Context) Flags() Flags { + return c.flags +} + +// WithFlags returns a new context with the flags set to the given value +func (c *Context) WithFlags(f Flags) Context { + ctx := *c + ctx.flags = f + return ctx +} + +// WithLocation returns a new context with the given location +func (c *Context) WithLocation(loc *time.Location) Context { + intest.Assert(loc) + ctx := *c + ctx.loc = loc + return ctx +} + +// Location returns the location of the context +func (c *Context) Location() *time.Location { + intest.Assert(c.loc) + if c.loc == nil { + // c.loc should always not be nil, just make the code safe here. + return time.UTC + } + return c.loc +} + +// AppendWarning appends the error to warning. If the inner `appendWarningFn` is nil, do nothing. +func (c *Context) AppendWarning(err error) { + intest.Assert(c.appendWarningFn != nil) + if fn := c.appendWarningFn; fn != nil { + // appendWarningFn should always not be nil, check fn != nil here to just make code safe. + fn(err) + } +} + +// AppendWarningFunc returns the inner `appendWarningFn` +func (c *Context) AppendWarningFunc() func(err error) { + return c.appendWarningFn +} + +// DefaultStmtFlags is the default flags for statement context with the flag `FlagAllowNegativeToUnsigned` set. +// TODO: make DefaultStmtFlags to be equal with StrictFlags, and setting flag `FlagAllowNegativeToUnsigned` +// is only for make the code to be equivalent with the old implement during refactoring. +const DefaultStmtFlags = StrictFlags | FlagAllowNegativeToUnsigned | FlagIgnoreZeroDateErr -// DefaultNoWarningContext is an alias of `DefaultNoWarningContext` -var DefaultNoWarningContext = context.DefaultNoWarningContext +// DefaultStmtNoWarningContext is the context with default statement flags without any other special configuration +var DefaultStmtNoWarningContext = NewContext(DefaultStmtFlags, time.UTC, func(_ error) { + // the error is ignored +}) diff --git a/pkg/types/context/context.go b/pkg/types/context/context.go deleted file mode 100644 index 59497437e52d8..0000000000000 --- a/pkg/types/context/context.go +++ /dev/null @@ -1,210 +0,0 @@ -// Copyright 2023 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package context - -import ( - "time" - - "github.com/pingcap/tidb/pkg/util/intest" -) - -// StrictFlags is a flags with a fields unset and has the most strict behavior. -const StrictFlags Flags = 0 - -// Flags indicates how to handle the conversion of a value. -type Flags uint16 - -const ( - // FlagIgnoreTruncateErr indicates to ignore the truncate error. - // If this flag is set, `FlagTruncateAsWarning` will be ignored. - FlagIgnoreTruncateErr Flags = 1 << iota - // FlagTruncateAsWarning indicates to append the truncate error to warnings instead of returning it to user. - FlagTruncateAsWarning - // FlagClipNegativeToZero indicates to clip the value to zero when casting a negative value to an unsigned integer. - // When this flag is set and the clip happens, an overflow error occurs and how to handle it will be determined by flags - // `FlagIgnoreOverflowError` and `FlagOverflowAsWarning`. - FlagClipNegativeToZero - // FlagIgnoreOverflowError indicates to ignore the overflow error. - // If this flag is set, `FlagOverflowAsWarning` will be ignored. - FlagIgnoreOverflowError - // FlagOverflowAsWarning indicates to append the overflow error to warnings instead of returning it to user. - FlagOverflowAsWarning - // FlagIgnoreZeroDateErr indicates to ignore the zero-date error. - // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_no_zero_date for details about the "zero-date" error. - // If this flag is set, `FlagZeroDateAsWarning` will be ignored. - FlagIgnoreZeroDateErr - // FlagZeroDateAsWarning indicates to append the zero-date error to warnings instead of returning it to user. - FlagZeroDateAsWarning - // FlagIgnoreZeroInDateErr indicates to ignore the zero-in-date error. - // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_no_zero_in_date for details about the "zero-in-date" error. - FlagIgnoreZeroInDateErr - // FlagZeroInDateAsWarning indicates to append the zero-in-date error to warnings instead of returning it to user. - FlagZeroInDateAsWarning - // FlagIgnoreInvalidDateErr indicates to ignore the invalid-date error. - // See: https://dev.mysql.com/doc/refman/8.0/en/sql-mode.html#sqlmode_allow_invalid_dates for details about the "invalid-date" error. - FlagIgnoreInvalidDateErr - // FlagInvalidDateAsWarning indicates to append the invalid-date error to warnings instead of returning it to user. - FlagInvalidDateAsWarning - // FlagSkipASCIICheck indicates to skip the ASCII check when converting the value to an ASCII string. - FlagSkipASCIICheck - // FlagSkipUTF8Check indicates to skip the UTF8 check when converting the value to an UTF8MB3 string. - FlagSkipUTF8Check - // FlagSkipUTF8MB4Check indicates to skip the UTF8MB4 check when converting the value to an UTF8 string. - FlagSkipUTF8MB4Check -) - -// ClipNegativeToZero indicates whether the flag `FlagClipNegativeToZero` is set -func (f Flags) ClipNegativeToZero() bool { - return f&FlagClipNegativeToZero != 0 -} - -// WithClipNegativeToZero returns a new flags with `FlagClipNegativeToZero` set/unset according to the clip parameter -func (f Flags) WithClipNegativeToZero(clip bool) Flags { - if clip { - return f | FlagClipNegativeToZero - } - return f &^ FlagClipNegativeToZero -} - -// SkipASCIICheck indicates whether the flag `FlagSkipASCIICheck` is set -func (f Flags) SkipASCIICheck() bool { - return f&FlagSkipASCIICheck != 0 -} - -// WithSkipSACIICheck returns a new flags with `FlagSkipASCIICheck` set/unset according to the skip parameter -func (f Flags) WithSkipSACIICheck(skip bool) Flags { - if skip { - return f | FlagSkipASCIICheck - } - return f &^ FlagSkipASCIICheck -} - -// SkipUTF8Check indicates whether the flag `FlagSkipUTF8Check` is set -func (f Flags) SkipUTF8Check() bool { - return f&FlagSkipUTF8Check != 0 -} - -// WithSkipUTF8Check returns a new flags with `FlagSkipUTF8Check` set/unset according to the skip parameter -func (f Flags) WithSkipUTF8Check(skip bool) Flags { - if skip { - return f | FlagSkipUTF8Check - } - return f &^ FlagSkipUTF8Check -} - -// SkipUTF8MB4Check indicates whether the flag `FlagSkipUTF8MB4Check` is set -func (f Flags) SkipUTF8MB4Check() bool { - return f&FlagSkipUTF8MB4Check != 0 -} - -// WithSkipUTF8MB4Check returns a new flags with `FlagSkipUTF8MB4Check` set/unset according to the skip parameter -func (f Flags) WithSkipUTF8MB4Check(skip bool) Flags { - if skip { - return f | FlagSkipUTF8MB4Check - } - return f &^ FlagSkipUTF8MB4Check -} - -// IgnoreTruncateErr indicates whether the flag `FlagIgnoreTruncateErr` is set -func (f Flags) IgnoreTruncateErr() bool { - return f&FlagIgnoreTruncateErr != 0 -} - -// WithIgnoreTruncateErr returns a new flags with `FlagIgnoreTruncateErr` set/unset according to the skip parameter -func (f Flags) WithIgnoreTruncateErr(ignore bool) Flags { - if ignore { - return f | FlagIgnoreTruncateErr - } - return f &^ FlagIgnoreTruncateErr -} - -// TruncateAsWarning indicates whether the flag `FlagTruncateAsWarning` is set -func (f Flags) TruncateAsWarning() bool { - return f&FlagTruncateAsWarning != 0 -} - -// WithTruncateAsWarning returns a new flags with `FlagTruncateAsWarning` set/unset according to the skip parameter -func (f Flags) WithTruncateAsWarning(warn bool) Flags { - if warn { - return f | FlagTruncateAsWarning - } - return f &^ FlagTruncateAsWarning -} - -// Context provides the information when converting between different types. -type Context struct { - flags Flags - loc *time.Location - appendWarningFn func(err error) -} - -// NewContext creates a new `Context` -func NewContext(flags Flags, loc *time.Location, appendWarningFn func(err error)) Context { - intest.Assert(loc != nil && appendWarningFn != nil) - return Context{ - flags: flags, - loc: loc, - appendWarningFn: appendWarningFn, - } -} - -// Flags returns the flags of the context -func (c *Context) Flags() Flags { - return c.flags -} - -// WithFlags returns a new context with the flags set to the given value -func (c *Context) WithFlags(f Flags) Context { - ctx := *c - ctx.flags = f - return ctx -} - -// WithLocation returns a new context with the given location -func (c *Context) WithLocation(loc *time.Location) Context { - intest.Assert(loc) - ctx := *c - ctx.loc = loc - return ctx -} - -// Location returns the location of the context -func (c *Context) Location() *time.Location { - intest.Assert(c.loc) - if c.loc == nil { - // c.loc should always not be nil, just make the code safe here. - return time.UTC - } - return c.loc -} - -// AppendWarning appends the error to warning. If the inner `appendWarningFn` is nil, do nothing. -func (c *Context) AppendWarning(err error) { - intest.Assert(c.appendWarningFn != nil) - if fn := c.appendWarningFn; fn != nil { - // appendWarningFn should always not be nil, check fn != nil here to just make code safe. - fn(err) - } -} - -// AppendWarningFunc returns the inner `appendWarningFn` -func (c *Context) AppendWarningFunc() func(err error) { - return c.appendWarningFn -} - -// DefaultNoWarningContext is the context without any special configuration -var DefaultNoWarningContext = NewContext(StrictFlags, time.UTC, func(_ error) { - // the error is ignored -}) diff --git a/pkg/types/context/context_test.go b/pkg/types/context_test.go similarity index 83% rename from pkg/types/context/context_test.go rename to pkg/types/context_test.go index a905c728d94af..0bd8d5cbcc811 100644 --- a/pkg/types/context/context_test.go +++ b/pkg/types/context_test.go @@ -12,10 +12,11 @@ // See the License for the specific language governing permissions and // limitations under the License. -package context +package types import ( "fmt" + "sync" "testing" "time" @@ -39,13 +40,13 @@ func TestSimpleOnOffFlags(t *testing.T) { writeFn func(Flags, bool) Flags }{ { - name: "FlagClipNegativeToZero", - flag: FlagClipNegativeToZero, + name: "FlagAllowNegativeToUnsigned", + flag: FlagAllowNegativeToUnsigned, readFn: func(f Flags) bool { - return f.ClipNegativeToZero() + return f.AllowNegativeToUnsigned() }, writeFn: func(f Flags, clip bool) Flags { - return f.WithClipNegativeToZero(clip) + return f.WithAllowNegativeToUnsigned(clip) }, }, { @@ -105,3 +106,26 @@ func TestSimpleOnOffFlags(t *testing.T) { require.False(t, c.readFn(f), msg) } } + +type warnStore struct { + sync.Mutex + warnings []error +} + +func (w *warnStore) AppendWarning(warn error) { + w.Lock() + defer w.Unlock() + + w.warnings = append(w.warnings, warn) +} + +func (w *warnStore) Reset() { + w.Lock() + defer w.Unlock() + + w.warnings = nil +} + +func (w *warnStore) GetWarnings() []error { + return w.warnings +} diff --git a/pkg/types/convert.go b/pkg/types/convert.go index 23a72229fd88e..7a860843d6afd 100644 --- a/pkg/types/convert.go +++ b/pkg/types/convert.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/util/hack" ) @@ -146,7 +145,7 @@ func ConvertUintToInt(val uint64, upperBound int64, tp byte) (int64, error) { // ConvertIntToUint converts an int value to an uint value. func ConvertIntToUint(flags Flags, val int64, upperBound uint64, tp byte) (uint64, error) { - if val < 0 && flags.ClipNegativeToZero() { + if val < 0 && !flags.AllowNegativeToUnsigned() { return 0, overflow(val, tp) } @@ -170,7 +169,7 @@ func ConvertUintToUint(val uint64, upperBound uint64, tp byte) (uint64, error) { func ConvertFloatToUint(flags Flags, fval float64, upperBound uint64, tp byte) (uint64, error) { val := RoundFloat(fval) if val < 0 { - if flags.ClipNegativeToZero() { + if !flags.AllowNegativeToUnsigned() { return 0, overflow(val, tp) } return uint64(int64(val)), overflow(val, tp) @@ -230,7 +229,7 @@ func convertScientificNotation(str string) (string, error) { } } -func convertDecimalStrToUint(sc *stmtctx.StatementContext, str string, upperBound uint64, tp byte) (uint64, error) { +func convertDecimalStrToUint(str string, upperBound uint64, tp byte) (uint64, error) { str, err := convertScientificNotation(str) if err != nil { return 0, err @@ -271,8 +270,8 @@ func convertDecimalStrToUint(sc *stmtctx.StatementContext, str string, upperBoun } // ConvertDecimalToUint converts a decimal to a uint by converting it to a string first to avoid float overflow (#10181). -func ConvertDecimalToUint(sc *stmtctx.StatementContext, d *MyDecimal, upperBound uint64, tp byte) (uint64, error) { - return convertDecimalStrToUint(sc, string(d.ToString()), upperBound, tp) +func ConvertDecimalToUint(d *MyDecimal, upperBound uint64, tp byte) (uint64, error) { + return convertDecimalStrToUint(string(d.ToString()), upperBound, tp) } // StrToInt converts a string to an integer at the best-effort. @@ -316,15 +315,15 @@ func StrToUint(ctx Context, str string, isFuncCast bool) (uint64, error) { } // StrToDateTime converts str to MySQL DateTime. -func StrToDateTime(sc *stmtctx.StatementContext, str string, fsp int) (Time, error) { - return ParseTime(sc, str, mysql.TypeDatetime, fsp, nil) +func StrToDateTime(ctx Context, str string, fsp int) (Time, error) { + return ParseTime(ctx, str, mysql.TypeDatetime, fsp, nil) } // StrToDuration converts str to Duration. It returns Duration in normal case, // and returns Time when str is in datetime format. // when isDuration is true, the d is returned, when it is false, the t is returned. // See https://dev.mysql.com/doc/refman/5.5/en/date-and-time-literals.html. -func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duration, t Time, isDuration bool, err error) { +func StrToDuration(ctx Context, str string, fsp int) (d Duration, t Time, isDuration bool, err error) { str = strings.TrimSpace(str) length := len(str) if length > 0 && str[0] == '-' { @@ -336,16 +335,15 @@ func StrToDuration(sc *stmtctx.StatementContext, str string, fsp int) (d Duratio // Timestamp format is 'YYYYMMDDHHMMSS' or 'YYMMDDHHMMSS', which length is 12. // See #3923, it explains what we do here. if length >= 12 { - t, err = StrToDateTime(sc, str, fsp) + t, err = StrToDateTime(ctx, str, fsp) if err == nil { return d, t, false, nil } } - d, _, err = ParseDuration(sc, str, fsp) + d, _, err = ParseDuration(ctx, str, fsp) if ErrTruncatedWrongVal.Equal(err) { - typeCtx := sc.TypeCtx() - err = typeCtx.HandleTruncate(err) + err = ctx.HandleTruncate(err) } return d, t, true, errors.Trace(err) } @@ -355,7 +353,7 @@ func NumberToDuration(number int64, fsp int) (Duration, error) { if number > TimeMaxValue { // Try to parse DATETIME. if number >= 10000000000 { // '2001-00-00 00-00-00' - if t, err := ParseDatetimeFromNum(nil, number); err == nil { + if t, err := ParseDatetimeFromNum(DefaultStmtNoWarningContext, number); err == nil { dur, err1 := t.ConvertToDuration() return dur, errors.Trace(err1) } @@ -576,21 +574,21 @@ func StrToFloat(ctx Context, str string, isFuncCast bool) (float64, error) { } // ConvertJSONToInt64 casts JSON into int64. -func ConvertJSONToInt64(sc *stmtctx.StatementContext, j BinaryJSON, unsigned bool) (int64, error) { - return ConvertJSONToInt(sc, j, unsigned, mysql.TypeLonglong) +func ConvertJSONToInt64(ctx Context, j BinaryJSON, unsigned bool) (int64, error) { + return ConvertJSONToInt(ctx, j, unsigned, mysql.TypeLonglong) } // ConvertJSONToInt casts JSON into int by type. -func ConvertJSONToInt(sc *stmtctx.StatementContext, j BinaryJSON, unsigned bool, tp byte) (int64, error) { +func ConvertJSONToInt(ctx Context, j BinaryJSON, unsigned bool, tp byte) (int64, error) { switch j.TypeCode { case JSONTypeCodeObject, JSONTypeCodeArray, JSONTypeCodeOpaque, JSONTypeCodeDate, JSONTypeCodeDatetime, JSONTypeCodeTimestamp, JSONTypeCodeDuration: - return 0, sc.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", j.String())) + return 0, ctx.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", j.String())) case JSONTypeCodeLiteral: switch j.Value[0] { case JSONLiteralFalse: return 0, nil case JSONLiteralNil: - return 0, sc.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", j.String())) + return 0, ctx.HandleTruncate(ErrTruncatedWrongVal.GenWithStackByArgs("INTEGER", j.String())) default: return 1, nil } @@ -598,44 +596,46 @@ func ConvertJSONToInt(sc *stmtctx.StatementContext, j BinaryJSON, unsigned bool, i := j.GetInt64() if unsigned { uBound := IntergerUnsignedUpperBound(tp) - u, err := ConvertIntToUint(sc.TypeFlags(), i, uBound, tp) - return int64(u), sc.HandleOverflow(err, err) + u, err := ConvertIntToUint(ctx.Flags(), i, uBound, tp) + return int64(u), err } lBound := IntergerSignedLowerBound(tp) uBound := IntergerSignedUpperBound(tp) - i, err := ConvertIntToInt(i, lBound, uBound, tp) - return i, sc.HandleOverflow(err, err) + return ConvertIntToInt(i, lBound, uBound, tp) case JSONTypeCodeUint64: u := j.GetUint64() if unsigned { uBound := IntergerUnsignedUpperBound(tp) u, err := ConvertUintToUint(u, uBound, tp) - return int64(u), sc.HandleOverflow(err, err) + return int64(u), err } uBound := IntergerSignedUpperBound(tp) - i, err := ConvertUintToInt(u, uBound, tp) - return i, sc.HandleOverflow(err, err) + return ConvertUintToInt(u, uBound, tp) case JSONTypeCodeFloat64: f := j.GetFloat64() if !unsigned { lBound := IntergerSignedLowerBound(tp) uBound := IntergerSignedUpperBound(tp) u, e := ConvertFloatToInt(f, lBound, uBound, tp) - return u, sc.HandleOverflow(e, e) + return u, e } bound := IntergerUnsignedUpperBound(tp) - u, err := ConvertFloatToUint(sc.TypeFlags(), f, bound, tp) - return int64(u), sc.HandleOverflow(err, err) + u, err := ConvertFloatToUint(ctx.Flags(), f, bound, tp) + return int64(u), err case JSONTypeCodeString: str := string(hack.String(j.GetString())) - if !unsigned { - r, e := StrToInt(sc.TypeCtxOrDefault(), str, false) - return r, sc.HandleOverflow(e, e) + // The behavior of casting json string as an integer is consistent with casting a string as an integer. + // See the `builtinCastStringAsIntSig` in `expression` pkg. The only difference is that this function + // doesn't append any warning. This behavior is compatible with MySQL. + isNegative := len(str) > 1 && str[0] == '-' + if !isNegative { + r, err := StrToUint(ctx, str, false) + return int64(r), err } - u, err := StrToUint(sc.TypeCtxOrDefault(), str, false) - return int64(u), sc.HandleOverflow(err, err) + + return StrToInt(ctx, str, false) } return 0, errors.New("Unknown type code in JSON") } diff --git a/pkg/types/convert_test.go b/pkg/types/convert_test.go index ac25732b989d3..fed44d5670603 100644 --- a/pkg/types/convert_test.go +++ b/pkg/types/convert_test.go @@ -25,7 +25,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/stretchr/testify/require" ) @@ -35,8 +34,7 @@ type invalidMockType struct { // Convert converts the val with type tp. func Convert(val interface{}, target *FieldType) (v interface{}, err error) { d := NewDatum(val) - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - ret, err := d.ConvertTo(sc, target) + ret, err := d.ConvertTo(DefaultStmtNoWarningContext, target) if err != nil { return ret.GetValue(), errors.Trace(err) } @@ -148,15 +146,15 @@ func TestConvertType(t *testing.T) { vv, err := Convert(v, ft) require.NoError(t, err) require.Equal(t, "10:11:12.1", vv.(Duration).String()) - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - vd, err := ParseTime(sc, "2010-10-10 10:11:11.12345", mysql.TypeDatetime, 2, nil) + typeCtx := DefaultStmtNoWarningContext + vd, err := ParseTime(typeCtx, "2010-10-10 10:11:11.12345", mysql.TypeDatetime, 2, nil) require.Equal(t, "2010-10-10 10:11:11.12", vd.String()) require.NoError(t, err) v, err = Convert(vd, ft) require.NoError(t, err) require.Equal(t, "10:11:11.1", v.(Duration).String()) - vt, err := ParseTime(sc, "2010-10-10 10:11:11.12345", mysql.TypeTimestamp, 2, nil) + vt, err := ParseTime(typeCtx, "2010-10-10 10:11:11.12345", mysql.TypeTimestamp, 2, nil) require.Equal(t, "2010-10-10 10:11:11.12", vt.String()) require.NoError(t, err) v, err = Convert(vt, ft) @@ -248,11 +246,11 @@ func TestConvertType(t *testing.T) { // Test Datum.ToDecimal with bad number. d := NewDatum("hello") - _, err = d.ToDecimal(sc.TypeCtxOrDefault()) + _, err = d.ToDecimal(typeCtx) require.Truef(t, terror.ErrorEqual(err, ErrTruncatedWrongVal), "err %v", err) - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - v, err = d.ToDecimal(sc.TypeCtxOrDefault()) + typeCtx = typeCtx.WithFlags(typeCtx.Flags().WithIgnoreTruncateErr(true)) + v, err = d.ToDecimal(typeCtx) require.NoError(t, err) require.Equal(t, "0", v.(*MyDecimal).String()) @@ -266,7 +264,7 @@ func TestConvertType(t *testing.T) { require.Equal(t, int64(2015), v) _, err = Convert(1800, ft) require.Error(t, err) - dt, err := ParseDate(nil, "2015-11-11") + dt, err := ParseDate(DefaultStmtNoWarningContext, "2015-11-11") require.NoError(t, err) v, err = Convert(dt, ft) require.NoError(t, err) @@ -345,11 +343,11 @@ func TestConvertToString(t *testing.T) { testToString(t, Enum{Name: "a", Value: 1}, "a") testToString(t, Set{Name: "a", Value: 1}, "a") - t1, err := ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil) + t1, err := ParseTime(DefaultStmtNoWarningContext, "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil) require.NoError(t, err) testToString(t, t1, "2011-11-10 11:11:11.999999") - td, _, err := ParseDuration(nil, "11:11:11.999999", 6) + td, _, err := ParseDuration(DefaultStmtNoWarningContext, "11:11:11.999999", 6) require.NoError(t, err) testToString(t, td, "11:11:11.999999") @@ -383,8 +381,7 @@ func TestConvertToString(t *testing.T) { ft.SetFlen(tt.flen) ft.SetCharset(tt.charset) inputDatum := NewStringDatum(tt.input) - sc := stmtctx.NewStmtCtx() - outputDatum, err := inputDatum.ConvertTo(sc, ft) + outputDatum, err := inputDatum.ConvertTo(DefaultStmtNoWarningContext, ft) if tt.input != tt.output { require.True(t, ErrDataTooLong.Equal(err), "flen: %d, charset: %s, input: %s, output: %s", tt.flen, tt.charset, tt.input, tt.output) } else { @@ -420,10 +417,9 @@ func TestConvertToStringWithCheck(t *testing.T) { ft.SetFlen(255) ft.SetCharset(tt.outputChs) inputDatum := NewStringDatum(tt.input) - sc := stmtctx.NewStmtCtx() - flags := tt.newFlags(sc.TypeFlags()) - sc.SetTypeFlags(flags) - outputDatum, err := inputDatum.ConvertTo(sc, ft) + ctx := DefaultStmtNoWarningContext + ctx = ctx.WithFlags(tt.newFlags(DefaultStmtFlags)) + outputDatum, err := inputDatum.ConvertTo(ctx, ft) if len(tt.output) == 0 { require.True(t, charset.ErrInvalidCharacterString.Equal(err), tt) } else { @@ -460,8 +456,7 @@ func TestConvertToBinaryString(t *testing.T) { ft.SetFlen(255) ft.SetCharset(tt.outputCharset) inputDatum := NewCollationStringDatum(tt.input, tt.inputCollate) - sc := stmtctx.NewStmtCtx() - outputDatum, err := inputDatum.ConvertTo(sc, ft) + outputDatum, err := inputDatum.ConvertTo(DefaultStmtNoWarningContext, ft) if len(tt.output) == 0 { require.True(t, charset.ErrInvalidCharacterString.Equal(err), tt) } else { @@ -472,7 +467,7 @@ func TestConvertToBinaryString(t *testing.T) { } func testStrToInt(t *testing.T, str string, expect int64, truncateAsErr bool, expectErr error) { - ctx := DefaultNoWarningContext.WithFlags(StrictFlags.WithIgnoreTruncateErr(!truncateAsErr)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(!truncateAsErr)) val, err := StrToInt(ctx, str, false) if expectErr != nil { require.Truef(t, terror.ErrorEqual(err, expectErr), "err %v", err) @@ -483,7 +478,7 @@ func testStrToInt(t *testing.T, str string, expect int64, truncateAsErr bool, ex } func testStrToUint(t *testing.T, str string, expect uint64, truncateAsErr bool, expectErr error) { - ctx := DefaultNoWarningContext.WithFlags(StrictFlags.WithIgnoreTruncateErr(!truncateAsErr)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(!truncateAsErr)) val, err := StrToUint(ctx, str, false) if expectErr != nil { require.Truef(t, terror.ErrorEqual(err, expectErr), "err %v", err) @@ -494,7 +489,7 @@ func testStrToUint(t *testing.T, str string, expect uint64, truncateAsErr bool, } func testStrToFloat(t *testing.T, str string, expect float64, truncateAsErr bool, expectErr error) { - ctx := DefaultNoWarningContext.WithFlags(StrictFlags.WithIgnoreTruncateErr(!truncateAsErr)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(!truncateAsErr)) val, err := StrToFloat(ctx, str, false) if expectErr != nil { require.Truef(t, terror.ErrorEqual(err, expectErr), "err %v", err) @@ -555,34 +550,22 @@ func TestStrToNum(t *testing.T) { } func testSelectUpdateDeleteEmptyStringError(t *testing.T) { - testCases := []struct { - inSelect bool - inDelete bool - }{ - {true, false}, - {false, true}, - } - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) - for _, tc := range testCases { - sc.InSelectStmt = tc.inSelect - sc.InDeleteStmt = tc.inDelete + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithTruncateAsWarning(true)) - str := "" - expect := 0 + str := "" + expect := 0 - val, err := StrToInt(sc.TypeCtxOrDefault(), str, false) - require.NoError(t, err) - require.Equal(t, int64(expect), val) + val, err := StrToInt(ctx, str, false) + require.NoError(t, err) + require.Equal(t, int64(expect), val) - val1, err := StrToUint(sc.TypeCtxOrDefault(), str, false) - require.NoError(t, err) - require.Equal(t, uint64(expect), val1) + val1, err := StrToUint(ctx, str, false) + require.NoError(t, err) + require.Equal(t, uint64(expect), val1) - val2, err := StrToFloat(sc.TypeCtxOrDefault(), str, false) - require.NoError(t, err) - require.Equal(t, float64(expect), val2) - } + val2, err := StrToFloat(ctx, str, false) + require.NoError(t, err) + require.Equal(t, float64(expect), val2) } func TestFieldTypeToStr(t *testing.T) { @@ -600,10 +583,8 @@ func accept(t *testing.T, tp byte, value interface{}, unsigned bool, expected st ft.AddFlag(mysql.UnsignedFlag) } d := NewDatum(value) - sc := stmtctx.NewStmtCtx() - sc.SetTimeZone(time.UTC) - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - casted, err := d.ConvertTo(sc, ft) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) + casted, err := d.ConvertTo(ctx, ft) require.NoErrorf(t, err, "%v", ft) if casted.IsNull() { require.Equal(t, "", expected) @@ -628,8 +609,7 @@ func deny(t *testing.T, tp byte, value interface{}, unsigned bool, expected stri ft.AddFlag(mysql.UnsignedFlag) } d := NewDatum(value) - sc := stmtctx.NewStmtCtx() - casted, err := d.ConvertTo(sc, ft) + casted, err := d.ConvertTo(DefaultStmtNoWarningContext, ft) require.Error(t, err) if casted.IsNull() { require.Equal(t, "", expected) @@ -883,12 +863,11 @@ func TestGetValidInt(t *testing.T) { {"123e+", "123", true, true}, {"123de", "123", true, true}, } - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithTruncateAsWarning(true)) - sc.InSelectStmt = true + warnings := &warnStore{} + ctx := NewContext(DefaultStmtFlags.WithTruncateAsWarning(true), time.UTC, warnings.AppendWarning) warningCount := 0 for i, tt := range tests { - prefix, err := getValidIntPrefix(sc.TypeCtxOrDefault(), tt.origin, false) + prefix, err := getValidIntPrefix(ctx, tt.origin, false) require.NoError(t, err) require.Equal(t, tt.valid, prefix) if tt.signed { @@ -897,13 +876,13 @@ func TestGetValidInt(t *testing.T) { _, err = strconv.ParseUint(prefix, 10, 64) } require.NoError(t, err) - warnings := sc.GetWarnings() + warn := warnings.GetWarnings() if tt.warning { - require.Lenf(t, warnings, warningCount+1, "%d", i) - require.True(t, terror.ErrorEqual(warnings[len(warnings)-1].Err, ErrTruncatedWrongVal)) + require.Lenf(t, warn, warningCount+1, "%d", i) + require.True(t, terror.ErrorEqual(warn[len(warn)-1], ErrTruncatedWrongVal)) warningCount++ } else { - require.Len(t, warnings, warningCount) + require.Len(t, warn, warningCount) } } @@ -927,10 +906,9 @@ func TestGetValidInt(t *testing.T) { {"123e+", "123", true}, {"123de", "123", true}, } - sc.SetTypeFlags(StrictFlags) - sc.InSelectStmt = false + ctx = ctx.WithFlags(DefaultStmtFlags) for _, tt := range tests2 { - prefix, err := getValidIntPrefix(sc.TypeCtxOrDefault(), tt.origin, false) + prefix, err := getValidIntPrefix(ctx, tt.origin, false) if tt.warning { require.True(t, terror.ErrorEqual(err, ErrTruncatedWrongVal)) } else { @@ -963,7 +941,7 @@ func TestGetValidFloat(t *testing.T) { {"9-3", "9"}, {"1001001\\u0000\\u0000\\u0000", "1001001"}, } - ctx := DefaultNoWarningContext + ctx := DefaultStmtNoWarningContext for _, tt := range tests { prefix, _ := getValidFloatPrefix(ctx, tt.origin, false) require.Equal(t, tt.valid, prefix) @@ -1017,12 +995,12 @@ func TestConvertTime(t *testing.T) { } for _, timezone := range timezones { - sc := stmtctx.NewStmtCtxWithTimeZone(timezone) - testConvertTimeTimeZone(t, sc) + ctx := DefaultStmtNoWarningContext.WithLocation(timezone) + testConvertTimeTimeZone(t, ctx) } } -func testConvertTimeTimeZone(t *testing.T, sc *stmtctx.StatementContext) { +func testConvertTimeTimeZone(t *testing.T, ctx Context) { raw := FromDate(2002, 3, 4, 4, 6, 7, 8) tests := []struct { input Time @@ -1054,7 +1032,7 @@ func testConvertTimeTimeZone(t *testing.T, sc *stmtctx.StatementContext) { for _, test := range tests { var d Datum d.SetMysqlTime(test.input) - nd, err := d.ConvertTo(sc, test.target) + nd, err := d.ConvertTo(ctx, test.target) require.NoError(t, err) v := nd.GetMysqlTime() require.Equal(t, test.expect.Type(), v.Type()) @@ -1084,7 +1062,7 @@ func TestConvertJSONToInt(t *testing.T) { j, err := ParseBinaryJSONFromString(tt.in) require.NoError(t, err) - casted, err := ConvertJSONToInt64(stmtctx.NewStmtCtx(), j, false) + casted, err := ConvertJSONToInt64(DefaultStmtNoWarningContext, j, false) if tt.err { require.Error(t, err, tt) } else { @@ -1114,7 +1092,7 @@ func TestConvertJSONToFloat(t *testing.T) { {in: "123.456hello", out: 123.456, ty: JSONTypeCodeString, err: true}, {in: "1234", out: 1234, ty: JSONTypeCodeString}, } - ctx := DefaultNoWarningContext + ctx := DefaultStmtNoWarningContext for _, tt := range tests { j := CreateBinaryJSON(tt.in) require.Equal(t, tt.ty, j.TypeCode) @@ -1143,7 +1121,7 @@ func TestConvertJSONToDecimal(t *testing.T) { {in: `false`, out: NewDecFromStringForTest("0")}, {in: `null`, out: NewDecFromStringForTest("0"), err: true}, } - ctx := DefaultNoWarningContext + ctx := DefaultStmtNoWarningContext for _, tt := range tests { j, err := ParseBinaryJSONFromString(tt.in) require.NoError(t, err) @@ -1210,7 +1188,6 @@ func TestNumberToDuration(t *testing.T) { } func TestStrToDuration(t *testing.T) { - sc := stmtctx.NewStmtCtx() var tests = []struct { str string fsp int @@ -1224,7 +1201,7 @@ func TestStrToDuration(t *testing.T) { {"00:00:00", 0, true}, } for _, tt := range tests { - _, _, isDuration, err := StrToDuration(sc, tt.str, tt.fsp) + _, _, isDuration, err := StrToDuration(DefaultStmtNoWarningContext, tt.str, tt.fsp) require.NoError(t, err) require.Equal(t, tt.isDuration, isDuration) } @@ -1288,7 +1265,7 @@ func TestConvertDecimalStrToUint(t *testing.T) { {"-10000000000000000000.0", 0, false}, } for _, ca := range cases { - result, err := convertDecimalStrToUint(stmtctx.NewStmtCtx(), ca.input, math.MaxUint64, 0) + result, err := convertDecimalStrToUint(ca.input, math.MaxUint64, 0) if !ca.succ { require.Error(t, err) } else { @@ -1297,11 +1274,11 @@ func TestConvertDecimalStrToUint(t *testing.T) { require.Equal(t, ca.result, result, "input=%v", ca.input) } - result, err := convertDecimalStrToUint(stmtctx.NewStmtCtx(), "-99.0", math.MaxUint8, 0) + result, err := convertDecimalStrToUint("-99.0", math.MaxUint8, 0) require.Error(t, err) require.Equal(t, uint64(0), result) - result, err = convertDecimalStrToUint(stmtctx.NewStmtCtx(), "-100.0", math.MaxUint8, 0) + result, err = convertDecimalStrToUint("-100.0", math.MaxUint8, 0) require.Error(t, err) require.Equal(t, uint64(0), result) } diff --git a/pkg/types/datum.go b/pkg/types/datum.go index ccc01697ab6f1..5d5a804eba54e 100644 --- a/pkg/types/datum.go +++ b/pkg/types/datum.go @@ -32,7 +32,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/parser/types" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/hack" "github.com/pingcap/tidb/pkg/util/logutil" @@ -630,13 +629,9 @@ func (d *Datum) SetValue(val interface{}, tp *types.FieldType) { // Compare compares datum to another datum. // Notes: don't rely on datum.collation to get the collator, it's tend to buggy. -func (d *Datum) Compare(sc *stmtctx.StatementContext, ad *Datum, comparer collate.Collator) (int, error) { - typeCtx := DefaultNoWarningContext - if sc != nil { - typeCtx = sc.TypeCtx() - } +func (d *Datum) Compare(ctx Context, ad *Datum, comparer collate.Collator) (int, error) { if d.k == KindMysqlJSON && ad.k != KindMysqlJSON { - cmp, err := ad.Compare(sc, d, comparer) + cmp, err := ad.Compare(ctx, d, comparer) return cmp * -1, errors.Trace(err) } switch ad.k { @@ -658,29 +653,29 @@ func (d *Datum) Compare(sc *stmtctx.StatementContext, ad *Datum, comparer collat } return -1, nil case KindInt64: - return d.compareInt64(typeCtx, ad.GetInt64()) + return d.compareInt64(ctx, ad.GetInt64()) case KindUint64: - return d.compareUint64(typeCtx, ad.GetUint64()) + return d.compareUint64(ctx, ad.GetUint64()) case KindFloat32, KindFloat64: - return d.compareFloat64(typeCtx, ad.GetFloat64()) + return d.compareFloat64(ctx, ad.GetFloat64()) case KindString: - return d.compareString(sc, ad.GetString(), comparer) + return d.compareString(ctx, ad.GetString(), comparer) case KindBytes: - return d.compareString(sc, ad.GetString(), comparer) + return d.compareString(ctx, ad.GetString(), comparer) case KindMysqlDecimal: - return d.compareMysqlDecimal(sc, ad.GetMysqlDecimal()) + return d.compareMysqlDecimal(ctx, ad.GetMysqlDecimal()) case KindMysqlDuration: - return d.compareMysqlDuration(sc, ad.GetMysqlDuration()) + return d.compareMysqlDuration(ctx, ad.GetMysqlDuration()) case KindMysqlEnum: - return d.compareMysqlEnum(typeCtx, ad.GetMysqlEnum(), comparer) + return d.compareMysqlEnum(ctx, ad.GetMysqlEnum(), comparer) case KindBinaryLiteral, KindMysqlBit: - return d.compareBinaryLiteral(typeCtx, ad.GetBinaryLiteral4Cmp(), comparer) + return d.compareBinaryLiteral(ctx, ad.GetBinaryLiteral4Cmp(), comparer) case KindMysqlSet: - return d.compareMysqlSet(typeCtx, ad.GetMysqlSet(), comparer) + return d.compareMysqlSet(ctx, ad.GetMysqlSet(), comparer) case KindMysqlJSON: - return d.compareMysqlJSON(sc, ad.GetMysqlJSON()) + return d.compareMysqlJSON(ad.GetMysqlJSON()) case KindMysqlTime: - return d.compareMysqlTime(sc, ad.GetMysqlTime()) + return d.compareMysqlTime(ctx, ad.GetMysqlTime()) default: return 0, nil } @@ -757,9 +752,7 @@ func (d *Datum) compareFloat64(ctx Context, f float64) (int, error) { } } -func (d *Datum) compareString(sc *stmtctx.StatementContext, s string, comparer collate.Collator) (int, error) { - typeCtx := sc.TypeCtxOrDefault() - +func (d *Datum) compareString(ctx Context, s string, comparer collate.Collator) (int, error) { switch d.k { case KindNull, KindMinNotNull: return -1, nil @@ -769,13 +762,13 @@ func (d *Datum) compareString(sc *stmtctx.StatementContext, s string, comparer c return comparer.Compare(d.GetString(), s), nil case KindMysqlDecimal: dec := new(MyDecimal) - err := typeCtx.HandleTruncate(dec.FromString(hack.Slice(s))) + err := ctx.HandleTruncate(dec.FromString(hack.Slice(s))) return d.GetMysqlDecimal().Compare(dec), errors.Trace(err) case KindMysqlTime: - dt, err := ParseDatetime(sc, s) + dt, err := ParseDatetime(ctx, s) return d.GetMysqlTime().Compare(dt), errors.Trace(err) case KindMysqlDuration: - dur, _, err := ParseDuration(sc, s, MaxFsp) + dur, _, err := ParseDuration(ctx, s, MaxFsp) return d.GetMysqlDuration().Compare(dur), errors.Trace(err) case KindMysqlSet: return comparer.Compare(d.GetMysqlSet().String(), s), nil @@ -784,17 +777,15 @@ func (d *Datum) compareString(sc *stmtctx.StatementContext, s string, comparer c case KindBinaryLiteral, KindMysqlBit: return comparer.Compare(d.GetBinaryLiteral4Cmp().ToString(), s), nil default: - fVal, err := StrToFloat(sc.TypeCtxOrDefault(), s, false) + fVal, err := StrToFloat(ctx, s, false) if err != nil { return 0, errors.Trace(err) } - return d.compareFloat64(sc.TypeCtxOrDefault(), fVal) + return d.compareFloat64(ctx, fVal) } } -func (d *Datum) compareMysqlDecimal(sc *stmtctx.StatementContext, dec *MyDecimal) (int, error) { - typeCtx := sc.TypeCtxOrDefault() - +func (d *Datum) compareMysqlDecimal(ctx Context, dec *MyDecimal) (int, error) { switch d.k { case KindNull, KindMinNotNull: return -1, nil @@ -804,10 +795,10 @@ func (d *Datum) compareMysqlDecimal(sc *stmtctx.StatementContext, dec *MyDecimal return d.GetMysqlDecimal().Compare(dec), nil case KindString, KindBytes: dDec := new(MyDecimal) - err := typeCtx.HandleTruncate(dDec.FromString(d.GetBytes())) + err := ctx.HandleTruncate(dDec.FromString(d.GetBytes())) return dDec.Compare(dec), errors.Trace(err) default: - dVal, err := d.ConvertTo(sc, NewFieldType(mysql.TypeNewDecimal)) + dVal, err := d.ConvertTo(ctx, NewFieldType(mysql.TypeNewDecimal)) if err != nil { return 0, errors.Trace(err) } @@ -815,7 +806,7 @@ func (d *Datum) compareMysqlDecimal(sc *stmtctx.StatementContext, dec *MyDecimal } } -func (d *Datum) compareMysqlDuration(sc *stmtctx.StatementContext, dur Duration) (int, error) { +func (d *Datum) compareMysqlDuration(ctx Context, dur Duration) (int, error) { switch d.k { case KindNull, KindMinNotNull: return -1, nil @@ -824,10 +815,10 @@ func (d *Datum) compareMysqlDuration(sc *stmtctx.StatementContext, dur Duration) case KindMysqlDuration: return d.GetMysqlDuration().Compare(dur), nil case KindString, KindBytes: - dDur, _, err := ParseDuration(sc, d.GetString(), MaxFsp) + dDur, _, err := ParseDuration(ctx, d.GetString(), MaxFsp) return dDur.Compare(dur), errors.Trace(err) default: - return d.compareFloat64(sc.TypeCtxOrDefault(), dur.Seconds()) + return d.compareFloat64(ctx, dur.Seconds()) } } @@ -877,7 +868,7 @@ func (d *Datum) compareMysqlSet(ctx Context, set Set, comparer collate.Collator) } } -func (d *Datum) compareMysqlJSON(_ *stmtctx.StatementContext, target BinaryJSON) (int, error) { +func (d *Datum) compareMysqlJSON(target BinaryJSON) (int, error) { // json is not equal with NULL if d.k == KindNull { return 1, nil @@ -890,14 +881,14 @@ func (d *Datum) compareMysqlJSON(_ *stmtctx.StatementContext, target BinaryJSON) return CompareBinaryJSON(origin, target), nil } -func (d *Datum) compareMysqlTime(sc *stmtctx.StatementContext, time Time) (int, error) { +func (d *Datum) compareMysqlTime(ctx Context, time Time) (int, error) { switch d.k { case KindNull, KindMinNotNull: return -1, nil case KindMaxValue: return 1, nil case KindString, KindBytes: - dt, err := ParseDatetime(sc, d.GetString()) + dt, err := ParseDatetime(ctx, d.GetString()) return dt.Compare(time), errors.Trace(err) case KindMysqlTime: return d.GetMysqlTime().Compare(time), nil @@ -906,13 +897,13 @@ func (d *Datum) compareMysqlTime(sc *stmtctx.StatementContext, time Time) (int, if err != nil { return 0, errors.Trace(err) } - return d.compareFloat64(sc.TypeCtxOrDefault(), fVal) + return d.compareFloat64(ctx, fVal) } } // ConvertTo converts a datum to the target field type. // change this method need sync modification to type2Kind in rowcodec/types.go -func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) ConvertTo(ctx Context, target *FieldType) (Datum, error) { if d.k == KindNull { return Datum{}, nil } @@ -920,32 +911,32 @@ func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datu case mysql.TypeTiny, mysql.TypeShort, mysql.TypeInt24, mysql.TypeLong, mysql.TypeLonglong: unsigned := mysql.HasUnsignedFlag(target.GetFlag()) if unsigned { - return d.convertToUint(sc, target) + return d.convertToUint(ctx, target) } - return d.convertToInt(sc, target) + return d.convertToInt(ctx, target) case mysql.TypeFloat, mysql.TypeDouble: - return d.convertToFloat(sc, target) + return d.convertToFloat(ctx, target) case mysql.TypeBlob, mysql.TypeTinyBlob, mysql.TypeMediumBlob, mysql.TypeLongBlob, mysql.TypeString, mysql.TypeVarchar, mysql.TypeVarString: - return d.convertToString(sc, target) + return d.convertToString(ctx, target) case mysql.TypeTimestamp: - return d.convertToMysqlTimestamp(sc, target) + return d.convertToMysqlTimestamp(ctx, target) case mysql.TypeDatetime, mysql.TypeDate: - return d.convertToMysqlTime(sc, target) + return d.convertToMysqlTime(ctx, target) case mysql.TypeDuration: - return d.convertToMysqlDuration(sc, target) + return d.convertToMysqlDuration(ctx, target) case mysql.TypeNewDecimal: - return d.convertToMysqlDecimal(sc, target) + return d.convertToMysqlDecimal(ctx, target) case mysql.TypeYear: - return d.ConvertToMysqlYear(sc, target) + return d.ConvertToMysqlYear(ctx, target) case mysql.TypeEnum: - return d.convertToMysqlEnum(sc, target) + return d.convertToMysqlEnum(ctx, target) case mysql.TypeBit: - return d.convertToMysqlBit(sc, target) + return d.convertToMysqlBit(ctx, target) case mysql.TypeSet: - return d.convertToMysqlSet(sc, target) + return d.convertToMysqlSet(ctx, target) case mysql.TypeJSON: - return d.convertToMysqlJSON(sc, target) + return d.convertToMysqlJSON(target) case mysql.TypeNull: return Datum{}, nil default: @@ -953,7 +944,7 @@ func (d *Datum) ConvertTo(sc *stmtctx.StatementContext, target *FieldType) (Datu } } -func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToFloat(ctx Context, target *FieldType) (Datum, error) { var ( f float64 ret Datum @@ -969,7 +960,7 @@ func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) case KindFloat32, KindFloat64: f = d.GetFloat64() case KindString, KindBytes: - f, err = StrToFloat(sc.TypeCtxOrDefault(), d.GetString(), false) + f, err = StrToFloat(ctx, d.GetString(), false) case KindMysqlTime: f, err = d.GetMysqlTime().ToNumber().ToFloat64() case KindMysqlDuration: @@ -981,14 +972,14 @@ func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) case KindMysqlEnum: f = d.GetMysqlEnum().ToNumber() case KindBinaryLiteral, KindMysqlBit: - val, err1 := d.GetBinaryLiteral().ToInt(sc.TypeCtxOrDefault()) + val, err1 := d.GetBinaryLiteral().ToInt(ctx) f, err = float64(val), err1 case KindMysqlJSON: - f, err = ConvertJSONToFloat(sc.TypeCtxOrDefault(), d.GetMysqlJSON()) + f, err = ConvertJSONToFloat(ctx, d.GetMysqlJSON()) default: return invalidConv(d, target.GetType()) } - f, err1 := ProduceFloatWithSpecifiedTp(f, target, sc) + f, err1 := ProduceFloatWithSpecifiedTp(f, target) if err == nil && err1 != nil { err = err1 } @@ -1001,7 +992,7 @@ func (d *Datum) convertToFloat(sc *stmtctx.StatementContext, target *FieldType) } // ProduceFloatWithSpecifiedTp produces a new float64 according to `flen` and `decimal`. -func ProduceFloatWithSpecifiedTp(f float64, target *FieldType, sc *stmtctx.StatementContext) (_ float64, err error) { +func ProduceFloatWithSpecifiedTp(f float64, target *FieldType) (_ float64, err error) { if math.IsNaN(f) { return 0, overflow(f, target.GetType()) } @@ -1012,29 +1003,32 @@ func ProduceFloatWithSpecifiedTp(f float64, target *FieldType, sc *stmtctx.State // If no D is set, we will handle it like origin float whether M is set or not. if target.GetFlen() != UnspecifiedLength && target.GetDecimal() != UnspecifiedLength { f, err = TruncateFloat(f, target.GetFlen(), target.GetDecimal()) - if err = sc.HandleOverflow(err, err); err != nil { - return f, errors.Trace(err) - } } if mysql.HasUnsignedFlag(target.GetFlag()) && f < 0 { return 0, overflow(f, target.GetType()) } + + if err != nil { + // We must return the error got from TruncateFloat after checking whether the target is unsigned to make sure + // the returned float is not negative when the target type is unsigned. + return f, errors.Trace(err) + } + if target.GetType() == mysql.TypeFloat && (f > math.MaxFloat32 || f < -math.MaxFloat32) { if f > 0 { return math.MaxFloat32, overflow(f, target.GetType()) } return -math.MaxFloat32, overflow(f, target.GetType()) } - return f, nil + return f, errors.Trace(err) } -func (d *Datum) convertToString(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToString(ctx Context, target *FieldType) (Datum, error) { var ( ret Datum s string err error ) - ctx := sc.TypeCtx() switch d.k { case KindInt64: s = strconv.FormatInt(d.GetInt64(), 10) @@ -1179,12 +1173,12 @@ func ProduceStrWithSpecifiedTp(s string, tp *FieldType, ctx Context, padZero boo return s, errors.Trace(ctx.HandleTruncate(err)) } -func (d *Datum) convertToInt(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { - i64, err := d.toSignedInteger(sc, target.GetType()) +func (d *Datum) convertToInt(ctx Context, target *FieldType) (Datum, error) { + i64, err := d.toSignedInteger(ctx, target.GetType()) return NewIntDatum(i64), errors.Trace(err) } -func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToUint(ctx Context, target *FieldType) (Datum, error) { tp := target.GetType() upperBound := IntergerUnsignedUpperBound(tp) var ( @@ -1194,14 +1188,14 @@ func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) ( ) switch d.k { case KindInt64: - val, err = ConvertIntToUint(sc.TypeFlags(), d.GetInt64(), upperBound, tp) + val, err = ConvertIntToUint(ctx.Flags(), d.GetInt64(), upperBound, tp) case KindUint64: val, err = ConvertUintToUint(d.GetUint64(), upperBound, tp) case KindFloat32, KindFloat64: - val, err = ConvertFloatToUint(sc.TypeFlags(), d.GetFloat64(), upperBound, tp) + val, err = ConvertFloatToUint(ctx.Flags(), d.GetFloat64(), upperBound, tp) case KindString, KindBytes: var err1 error - val, err1 = StrToUint(sc.TypeCtxOrDefault(), d.GetString(), false) + val, err1 = StrToUint(ctx, d.GetString(), false) val, err = ConvertUintToUint(val, upperBound, tp) if err == nil { err = err1 @@ -1213,7 +1207,7 @@ func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) ( if err == nil { err = err1 } - val, err1 = ConvertIntToUint(sc.TypeFlags(), ival, upperBound, tp) + val, err1 = ConvertIntToUint(ctx.Flags(), ival, upperBound, tp) if err == nil { err = err1 } @@ -1221,24 +1215,24 @@ func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) ( dec := d.GetMysqlDuration().ToNumber() err = dec.Round(dec, 0, ModeHalfUp) var err1 error - val, err1 = ConvertDecimalToUint(sc, dec, upperBound, tp) + val, err1 = ConvertDecimalToUint(dec, upperBound, tp) if err == nil { err = err1 } case KindMysqlDecimal: - val, err = ConvertDecimalToUint(sc, d.GetMysqlDecimal(), upperBound, tp) + val, err = ConvertDecimalToUint(d.GetMysqlDecimal(), upperBound, tp) case KindMysqlEnum: - val, err = ConvertFloatToUint(sc.TypeFlags(), d.GetMysqlEnum().ToNumber(), upperBound, tp) + val, err = ConvertFloatToUint(ctx.Flags(), d.GetMysqlEnum().ToNumber(), upperBound, tp) case KindMysqlSet: - val, err = ConvertFloatToUint(sc.TypeFlags(), d.GetMysqlSet().ToNumber(), upperBound, tp) + val, err = ConvertFloatToUint(ctx.Flags(), d.GetMysqlSet().ToNumber(), upperBound, tp) case KindBinaryLiteral, KindMysqlBit: - val, err = d.GetBinaryLiteral().ToInt(sc.TypeCtxOrDefault()) + val, err = d.GetBinaryLiteral().ToInt(ctx) if err == nil { val, err = ConvertUintToUint(val, upperBound, tp) } case KindMysqlJSON: var i64 int64 - i64, err = ConvertJSONToInt(sc, d.GetMysqlJSON(), true, tp) + i64, err = ConvertJSONToInt(ctx, d.GetMysqlJSON(), true, tp) val = uint64(i64) default: return invalidConv(d, target.GetType()) @@ -1250,7 +1244,7 @@ func (d *Datum) convertToUint(sc *stmtctx.StatementContext, target *FieldType) ( return ret, nil } -func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlTimestamp(ctx Context, target *FieldType) (Datum, error) { var ( ret Datum t Time @@ -1262,26 +1256,26 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi } switch d.k { case KindMysqlTime: - t, err = d.GetMysqlTime().Convert(sc, target.GetType()) + t, err = d.GetMysqlTime().Convert(ctx, target.GetType()) if err != nil { // t might be an invalid Timestamp, but should still be comparable, since same representation (KindMysqlTime) ret.SetMysqlTime(t) return ret, errors.Trace(ErrWrongValue.GenWithStackByArgs(TimestampStr, t.String())) } - t, err = t.RoundFrac(sc, fsp) + t, err = t.RoundFrac(ctx, fsp) case KindMysqlDuration: - t, err = d.GetMysqlDuration().ConvertToTime(sc, mysql.TypeTimestamp) + t, err = d.GetMysqlDuration().ConvertToTime(ctx, mysql.TypeTimestamp) if err != nil { ret.SetMysqlTime(t) return ret, errors.Trace(err) } - t, err = t.RoundFrac(sc, fsp) + t, err = t.RoundFrac(ctx, fsp) case KindString, KindBytes: - t, err = ParseTime(sc, d.GetString(), mysql.TypeTimestamp, fsp, nil) + t, err = ParseTime(ctx, d.GetString(), mysql.TypeTimestamp, fsp, nil) case KindInt64: - t, err = ParseTimeFromNum(sc, d.GetInt64(), mysql.TypeTimestamp, fsp) + t, err = ParseTimeFromNum(ctx, d.GetInt64(), mysql.TypeTimestamp, fsp) case KindMysqlDecimal: - t, err = ParseTimeFromFloatString(sc, d.GetMysqlDecimal().String(), mysql.TypeTimestamp, fsp) + t, err = ParseTimeFromFloatString(ctx, d.GetMysqlDecimal().String(), mysql.TypeTimestamp, fsp) case KindMysqlJSON: j := d.GetMysqlJSON() var s string @@ -1290,7 +1284,7 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi ret.SetMysqlTime(t) return ret, err } - t, err = ParseTime(sc, s, mysql.TypeTimestamp, fsp, nil) + t, err = ParseTime(ctx, s, mysql.TypeTimestamp, fsp, nil) default: return invalidConv(d, mysql.TypeTimestamp) } @@ -1302,7 +1296,7 @@ func (d *Datum) convertToMysqlTimestamp(sc *stmtctx.StatementContext, target *Fi return ret, nil } -func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlTime(ctx Context, target *FieldType) (Datum, error) { tp := target.GetType() fsp := DefaultFsp if target.GetDecimal() != UnspecifiedLength { @@ -1315,32 +1309,32 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy ) switch d.k { case KindMysqlTime: - t, err = d.GetMysqlTime().Convert(sc, tp) + t, err = d.GetMysqlTime().Convert(ctx, tp) if err != nil { ret.SetMysqlTime(t) return ret, errors.Trace(err) } - t, err = t.RoundFrac(sc, fsp) + t, err = t.RoundFrac(ctx, fsp) case KindMysqlDuration: - t, err = d.GetMysqlDuration().ConvertToTime(sc, tp) + t, err = d.GetMysqlDuration().ConvertToTime(ctx, tp) if err != nil { ret.SetMysqlTime(t) return ret, errors.Trace(err) } - t, err = t.RoundFrac(sc, fsp) + t, err = t.RoundFrac(ctx, fsp) case KindMysqlDecimal: - t, err = ParseTimeFromFloatString(sc, d.GetMysqlDecimal().String(), tp, fsp) + t, err = ParseTimeFromFloatString(ctx, d.GetMysqlDecimal().String(), tp, fsp) case KindString, KindBytes: - t, err = ParseTime(sc, d.GetString(), tp, fsp, nil) + t, err = ParseTime(ctx, d.GetString(), tp, fsp, nil) case KindInt64: - t, err = ParseTimeFromNum(sc, d.GetInt64(), tp, fsp) + t, err = ParseTimeFromNum(ctx, d.GetInt64(), tp, fsp) case KindUint64: intOverflow64 := d.GetInt64() < 0 if intOverflow64 { uNum := strconv.FormatUint(d.GetUint64(), 10) t, err = ZeroDate, ErrWrongValue.GenWithStackByArgs(TimeStr, uNum) } else { - t, err = ParseTimeFromNum(sc, d.GetInt64(), tp, fsp) + t, err = ParseTimeFromNum(ctx, d.GetInt64(), tp, fsp) } case KindMysqlJSON: j := d.GetMysqlJSON() @@ -1350,7 +1344,7 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy ret.SetMysqlTime(t) return ret, err } - t, err = ParseTime(sc, s, tp, fsp, nil) + t, err = ParseTime(ctx, s, tp, fsp, nil) default: return invalidConv(d, tp) } @@ -1365,7 +1359,7 @@ func (d *Datum) convertToMysqlTime(sc *stmtctx.StatementContext, target *FieldTy return ret, nil } -func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlDuration(typeCtx Context, target *FieldType) (Datum, error) { tp := target.GetType() fsp := DefaultFsp if target.GetDecimal() != UnspecifiedLength { @@ -1379,13 +1373,13 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie ret.SetMysqlDuration(dur) return ret, errors.Trace(err) } - dur, err = dur.RoundFrac(fsp, sc.TimeZone()) + dur, err = dur.RoundFrac(fsp, typeCtx.Location()) ret.SetMysqlDuration(dur) if err != nil { return ret, errors.Trace(err) } case KindMysqlDuration: - dur, err := d.GetMysqlDuration().RoundFrac(fsp, sc.TimeZone()) + dur, err := d.GetMysqlDuration().RoundFrac(fsp, typeCtx.Location()) ret.SetMysqlDuration(dur) if err != nil { return ret, errors.Trace(err) @@ -1396,7 +1390,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie if err != nil { return ret, errors.Trace(err) } - timeNum, err := d.ToInt64(sc) + timeNum, err := d.ToInt64(typeCtx) if err != nil { return ret, errors.Trace(err) } @@ -1409,13 +1403,13 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie if timeNum < -MaxDuration { return ret, ErrWrongValue.GenWithStackByArgs(TimeStr, timeStr) } - t, _, err := ParseDuration(sc, timeStr, fsp) + t, _, err := ParseDuration(typeCtx, timeStr, fsp) ret.SetMysqlDuration(t) if err != nil { return ret, errors.Trace(err) } case KindString, KindBytes: - t, _, err := ParseDuration(sc, d.GetString(), fsp) + t, _, err := ParseDuration(typeCtx, d.GetString(), fsp) ret.SetMysqlDuration(t) if err != nil { return ret, errors.Trace(err) @@ -1426,7 +1420,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie if err != nil { return ret, errors.Trace(err) } - t, _, err := ParseDuration(sc, s, fsp) + t, _, err := ParseDuration(typeCtx, s, fsp) ret.SetMysqlDuration(t) if err != nil { return ret, errors.Trace(err) @@ -1437,7 +1431,7 @@ func (d *Datum) convertToMysqlDuration(sc *stmtctx.StatementContext, target *Fie return ret, nil } -func (d *Datum) convertToMysqlDecimal(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlDecimal(ctx Context, target *FieldType) (Datum, error) { var ret Datum ret.SetLength(target.GetFlen()) ret.SetFrac(target.GetDecimal()) @@ -1463,11 +1457,11 @@ func (d *Datum) convertToMysqlDecimal(sc *stmtctx.StatementContext, target *Fiel case KindMysqlSet: err = dec.FromFloat64(d.GetMysqlSet().ToNumber()) case KindBinaryLiteral, KindMysqlBit: - val, err1 := d.GetBinaryLiteral().ToInt(sc.TypeCtxOrDefault()) + val, err1 := d.GetBinaryLiteral().ToInt(ctx) err = err1 dec.FromUint(val) case KindMysqlJSON: - f, err1 := ConvertJSONToDecimal(sc.TypeCtxOrDefault(), d.GetMysqlJSON()) + f, err1 := ConvertJSONToDecimal(ctx, d.GetMysqlJSON()) if err1 != nil { return ret, errors.Trace(err1) } @@ -1475,7 +1469,7 @@ func (d *Datum) convertToMysqlDecimal(sc *stmtctx.StatementContext, target *Fiel default: return invalidConv(d, target.GetType()) } - dec1, err1 := ProduceDecWithSpecifiedTp(dec, target, sc) + dec1, err1 := ProduceDecWithSpecifiedTp(ctx, dec, target) // If there is a error, dec1 may be nil. if dec1 != nil { dec = dec1 @@ -1494,7 +1488,7 @@ func (d *Datum) convertToMysqlDecimal(sc *stmtctx.StatementContext, target *Fiel } // ProduceDecWithSpecifiedTp produces a new decimal according to `flen` and `decimal`. -func ProduceDecWithSpecifiedTp(dec *MyDecimal, tp *FieldType, sc *stmtctx.StatementContext) (_ *MyDecimal, err error) { +func ProduceDecWithSpecifiedTp(ctx Context, dec *MyDecimal, tp *FieldType) (_ *MyDecimal, err error) { flen, decimal := tp.GetFlen(), tp.GetDecimal() if flen != UnspecifiedLength && decimal != UnspecifiedLength { if flen < decimal { @@ -1522,14 +1516,10 @@ func ProduceDecWithSpecifiedTp(dec *MyDecimal, tp *FieldType, sc *stmtctx.Statem // select cast(111 as decimal(1)) causes a warning in MySQL. err = ErrOverflow.GenWithStackByArgs("DECIMAL", fmt.Sprintf("(%d, %d)", flen, decimal)) } else if old != nil && dec.Compare(old) != 0 { - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("DECIMAL", old)) + ctx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("DECIMAL", old)) } } - if ErrOverflow.Equal(err) { - // TODO: warnErr need to be ErrWarnDataOutOfRange - err = sc.HandleOverflow(err, err) - } unsigned := mysql.HasUnsignedFlag(tp.GetFlag()) if unsigned && dec.IsNegative() { dec = dec.FromUint(0) @@ -1538,7 +1528,7 @@ func ProduceDecWithSpecifiedTp(dec *MyDecimal, tp *FieldType, sc *stmtctx.Statem } // ConvertToMysqlYear converts a datum to MySQLYear. -func (d *Datum) ConvertToMysqlYear(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) ConvertToMysqlYear(ctx Context, target *FieldType) (Datum, error) { var ( ret Datum y int64 @@ -1549,7 +1539,7 @@ func (d *Datum) ConvertToMysqlYear(sc *stmtctx.StatementContext, target *FieldTy case KindString, KindBytes: s := d.GetString() trimS := strings.TrimSpace(s) - y, err = StrToInt(sc.TypeCtxOrDefault(), trimS, false) + y, err = StrToInt(ctx, trimS, false) if err != nil { ret.SetInt64(0) return ret, errors.Trace(err) @@ -1562,13 +1552,13 @@ func (d *Datum) ConvertToMysqlYear(sc *stmtctx.StatementContext, target *FieldTy case KindMysqlTime: y = int64(d.GetMysqlTime().Year()) case KindMysqlJSON: - y, err = ConvertJSONToInt64(sc, d.GetMysqlJSON(), false) + y, err = ConvertJSONToInt64(ctx, d.GetMysqlJSON(), false) if err != nil { ret.SetInt64(0) return ret, errors.Trace(err) } default: - ret, err = d.convertToInt(sc, NewFieldType(mysql.TypeLonglong)) + ret, err = d.convertToInt(ctx, NewFieldType(mysql.TypeLonglong)) if err != nil { _, err = invalidConv(d, target.GetType()) ret.SetInt64(0) @@ -1590,13 +1580,13 @@ func (d *Datum) convertStringToMysqlBit(ctx Context) (uint64, error) { return bitStr.ToInt(ctx) } -func (d *Datum) convertToMysqlBit(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlBit(ctx Context, target *FieldType) (Datum, error) { var ret Datum var uintValue uint64 var err error switch d.k { case KindBytes: - uintValue, err = BinaryLiteral(d.b).ToInt(sc.TypeCtxOrDefault()) + uintValue, err = BinaryLiteral(d.b).ToInt(ctx) case KindString: // For single bit value, we take string like "true", "1" as 1, and "false", "0" as 0, // this behavior is not documented in MySQL, but it behaves so, for more information, see issue #18681 @@ -1608,17 +1598,17 @@ func (d *Datum) convertToMysqlBit(sc *stmtctx.StatementContext, target *FieldTyp case "false", "0": uintValue = 0 default: - uintValue, err = d.convertStringToMysqlBit(sc.TypeCtxOrDefault()) + uintValue, err = d.convertStringToMysqlBit(ctx) } } else { - uintValue, err = d.convertStringToMysqlBit(sc.TypeCtxOrDefault()) + uintValue, err = d.convertStringToMysqlBit(ctx) } case KindInt64: // if input kind is int64 (signed), when trans to bit, we need to treat it as unsigned d.k = KindUint64 fallthrough default: - uintDatum, err1 := d.convertToUint(sc, target) + uintDatum, err1 := d.convertToUint(ctx, target) uintValue, err = uintDatum.GetUint64(), err1 } // Avoid byte size panic, never goto this branch. @@ -1634,7 +1624,7 @@ func (d *Datum) convertToMysqlBit(sc *stmtctx.StatementContext, target *FieldTyp return ret, errors.Trace(err) } -func (d *Datum) convertToMysqlEnum(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlEnum(ctx Context, target *FieldType) (Datum, error) { var ( ret Datum e Enum @@ -1655,7 +1645,7 @@ func (d *Datum) convertToMysqlEnum(sc *stmtctx.StatementContext, target *FieldTy e, err = ParseEnum(target.GetElems(), d.GetMysqlSet().Name, target.GetCollate()) default: var uintDatum Datum - uintDatum, err = d.convertToUint(sc, target) + uintDatum, err = d.convertToUint(ctx, target) if err == nil { e, err = ParseEnumValue(target.GetElems(), uintDatum.GetUint64()) } else { @@ -1666,7 +1656,7 @@ func (d *Datum) convertToMysqlEnum(sc *stmtctx.StatementContext, target *FieldTy return ret, err } -func (d *Datum) convertToMysqlSet(sc *stmtctx.StatementContext, target *FieldType) (Datum, error) { +func (d *Datum) convertToMysqlSet(ctx Context, target *FieldType) (Datum, error) { var ( ret Datum s Set @@ -1681,7 +1671,7 @@ func (d *Datum) convertToMysqlSet(sc *stmtctx.StatementContext, target *FieldTyp s, err = ParseSet(target.GetElems(), d.GetMysqlSet().Name, target.GetCollate()) default: var uintDatum Datum - uintDatum, err = d.convertToUint(sc, target) + uintDatum, err = d.convertToUint(ctx, target) if err == nil { s, err = ParseSetValue(target.GetElems(), uintDatum.GetUint64()) } @@ -1693,7 +1683,7 @@ func (d *Datum) convertToMysqlSet(sc *stmtctx.StatementContext, target *FieldTyp return ret, err } -func (d *Datum) convertToMysqlJSON(_ *stmtctx.StatementContext, _ *FieldType) (ret Datum, err error) { +func (d *Datum) convertToMysqlJSON(_ *FieldType) (ret Datum, err error) { switch d.k { case KindString, KindBytes: var j BinaryJSON @@ -1843,15 +1833,15 @@ func (d *Datum) ToDecimal(ctx Context) (*MyDecimal, error) { } // ToInt64 converts to a int64. -func (d *Datum) ToInt64(sc *stmtctx.StatementContext) (int64, error) { +func (d *Datum) ToInt64(ctx Context) (int64, error) { if d.Kind() == KindMysqlBit { - uintVal, err := d.GetBinaryLiteral().ToInt(sc.TypeCtxOrDefault()) + uintVal, err := d.GetBinaryLiteral().ToInt(ctx) return int64(uintVal), err } - return d.toSignedInteger(sc, mysql.TypeLonglong) + return d.toSignedInteger(ctx, mysql.TypeLonglong) } -func (d *Datum) toSignedInteger(sc *stmtctx.StatementContext, tp byte) (int64, error) { +func (d *Datum) toSignedInteger(ctx Context, tp byte) (int64, error) { lowerBound := IntergerSignedLowerBound(tp) upperBound := IntergerSignedUpperBound(tp) switch d.Kind() { @@ -1864,7 +1854,7 @@ func (d *Datum) toSignedInteger(sc *stmtctx.StatementContext, tp byte) (int64, e case KindFloat64: return ConvertFloatToInt(d.GetFloat64(), lowerBound, upperBound, tp) case KindString, KindBytes: - iVal, err := StrToInt(sc.TypeCtxOrDefault(), d.GetString(), false) + iVal, err := StrToInt(ctx, d.GetString(), false) iVal, err2 := ConvertIntToInt(iVal, lowerBound, upperBound, tp) if err == nil { err = err2 @@ -1873,7 +1863,7 @@ func (d *Datum) toSignedInteger(sc *stmtctx.StatementContext, tp byte) (int64, e case KindMysqlTime: // 2011-11-10 11:11:11.999999 -> 20111110111112 // 2011-11-10 11:59:59.999999 -> 20111110120000 - t, err := d.GetMysqlTime().RoundFrac(sc, DefaultFsp) + t, err := d.GetMysqlTime().RoundFrac(ctx, DefaultFsp) if err != nil { return 0, errors.Trace(err) } @@ -1886,7 +1876,7 @@ func (d *Datum) toSignedInteger(sc *stmtctx.StatementContext, tp byte) (int64, e case KindMysqlDuration: // 11:11:11.999999 -> 111112 // 11:59:59.999999 -> 120000 - dur, err := d.GetMysqlDuration().RoundFrac(DefaultFsp, sc.TimeZone()) + dur, err := d.GetMysqlDuration().RoundFrac(DefaultFsp, ctx.Location()) if err != nil { return 0, errors.Trace(err) } @@ -1915,9 +1905,9 @@ func (d *Datum) toSignedInteger(sc *stmtctx.StatementContext, tp byte) (int64, e fval := d.GetMysqlSet().ToNumber() return ConvertFloatToInt(fval, lowerBound, upperBound, tp) case KindMysqlJSON: - return ConvertJSONToInt(sc, d.GetMysqlJSON(), false, tp) + return ConvertJSONToInt(ctx, d.GetMysqlJSON(), false, tp) case KindBinaryLiteral, KindMysqlBit: - val, err := d.GetBinaryLiteral().ToInt(sc.TypeCtxOrDefault()) + val, err := d.GetBinaryLiteral().ToInt(ctx) if err != nil { return 0, errors.Trace(err) } @@ -2261,15 +2251,15 @@ func MaxValueDatum() Datum { } // SortDatums sorts a slice of datum. -func SortDatums(sc *stmtctx.StatementContext, datums []Datum) error { - sorter := datumsSorter{datums: datums, sc: sc} +func SortDatums(ctx Context, datums []Datum) error { + sorter := datumsSorter{datums: datums, ctx: ctx} sort.Sort(&sorter) return sorter.err } type datumsSorter struct { datums []Datum - sc *stmtctx.StatementContext + ctx Context err error } @@ -2278,7 +2268,7 @@ func (ds *datumsSorter) Len() int { } func (ds *datumsSorter) Less(i, j int) bool { - cmp, err := ds.datums[i].Compare(ds.sc, &ds.datums[j], collate.GetCollator(ds.datums[i].Collation())) + cmp, err := ds.datums[i].Compare(ds.ctx, &ds.datums[j], collate.GetCollator(ds.datums[i].Collation())) if err != nil { ds.err = errors.Trace(err) return true @@ -2459,11 +2449,11 @@ func getDatumBound(retType *FieldType, rType RoundingType) Datum { // case, we should judge whether the rounding type are ceiling. If it is, then we should plus one for // 1.0 and get the reverse result 2.0. func ChangeReverseResultByUpperLowerBound( - sc *stmtctx.StatementContext, + ctx Context, retType *FieldType, res Datum, rType RoundingType) (Datum, error) { - d, err := res.ConvertTo(sc, retType) + d, err := res.ConvertTo(ctx, retType) if terror.ErrorEqual(err, ErrOverflow) { return d, nil } @@ -2487,7 +2477,7 @@ func ChangeReverseResultByUpperLowerBound( resRetType.SetDecimalUnderLimit(int(res.GetMysqlDecimal().GetDigitsInt())) } bound := getDatumBound(&resRetType, rType) - cmp, err := d.Compare(sc, &bound, collate.GetCollator(resRetType.GetCollate())) + cmp, err := d.Compare(ctx, &bound, collate.GetCollator(resRetType.GetCollate())) if err != nil { return d, err } diff --git a/pkg/types/datum_eval.go b/pkg/types/datum_eval.go index cb3c0a87148ed..204a873e0e0ad 100644 --- a/pkg/types/datum_eval.go +++ b/pkg/types/datum_eval.go @@ -17,7 +17,6 @@ package types import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/opcode" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // ComputePlus computes the result of a+b. @@ -56,7 +55,7 @@ func ComputePlus(a, b Datum) (d Datum, err error) { r := new(MyDecimal) err = DecimalAdd(a.GetMysqlDecimal(), b.GetMysqlDecimal(), r) d.SetMysqlDecimal(r) - d.SetFrac(mathutil.Max(a.Frac(), b.Frac())) + d.SetFrac(max(a.Frac(), b.Frac())) return d, err } } diff --git a/pkg/types/datum_test.go b/pkg/types/datum_test.go index a5fc7a57d7758..b1f4dc053e0eb 100644 --- a/pkg/types/datum_test.go +++ b/pkg/types/datum_test.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/charset" "github.com/pingcap/tidb/pkg/parser/mysql" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/util/collate" "github.com/pingcap/tidb/pkg/util/hack" "github.com/stretchr/testify/assert" @@ -56,7 +55,7 @@ func TestDatum(t *testing.T) { func testDatumToBool(t *testing.T, in interface{}, res int) { datum := NewDatum(in) res64 := int64(res) - ctx := DefaultNoWarningContext.WithFlags(StrictFlags.WithIgnoreTruncateErr(true)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) b, err := datum.ToBool(ctx) require.NoError(t, err) require.Equal(t, res64, b) @@ -93,11 +92,11 @@ func TestToBool(t *testing.T) { testDatumToBool(t, CreateBinaryJSON(true), 1) testDatumToBool(t, CreateBinaryJSON(false), 1) testDatumToBool(t, CreateBinaryJSON(""), 1) - t1, err := ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil) + t1, err := ParseTime(DefaultStmtNoWarningContext, "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 6, nil) require.NoError(t, err) testDatumToBool(t, t1, 1) - td, _, err := ParseDuration(nil, "11:11:11.999999", 6) + td, _, err := ParseDuration(DefaultStmtNoWarningContext, "11:11:11.999999", 6) require.NoError(t, err) testDatumToBool(t, td, 1) @@ -107,7 +106,7 @@ func TestToBool(t *testing.T) { require.NoError(t, err) testDatumToBool(t, v, 1) d := NewDatum(&invalidMockType{}) - ctx := DefaultNoWarningContext.WithFlags(StrictFlags.WithIgnoreTruncateErr(true)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) _, err = d.ToBool(ctx) require.Error(t, err) } @@ -115,10 +114,9 @@ func TestToBool(t *testing.T) { func testDatumToInt64(t *testing.T, val interface{}, expect int64) { d := NewDatum(val) - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) - b, err := d.ToInt64(sc) + b, err := d.ToInt64(ctx) require.NoError(t, err) require.Equal(t, expect, b) } @@ -135,11 +133,11 @@ func TestToInt64(t *testing.T) { testDatumToInt64(t, Set{Name: "a", Value: 1}, int64(1)) testDatumToInt64(t, CreateBinaryJSON(int64(3)), int64(3)) - t1, err := ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 0, nil) + t1, err := ParseTime(DefaultStmtNoWarningContext, "2011-11-10 11:11:11.999999", mysql.TypeTimestamp, 0, nil) require.NoError(t, err) testDatumToInt64(t, t1, int64(20111110111112)) - td, _, err := ParseDuration(nil, "11:11:11.999999", 6) + td, _, err := ParseDuration(DefaultStmtNoWarningContext, "11:11:11.999999", 6) require.NoError(t, err) testDatumToInt64(t, td, int64(111112)) @@ -152,12 +150,11 @@ func TestToInt64(t *testing.T) { func testDatumToUInt32(t *testing.T, val interface{}, expect uint32, hasError bool) { d := NewDatum(val) - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) ft := NewFieldType(mysql.TypeLong) ft.AddFlag(mysql.UnsignedFlag) - converted, err := d.ConvertTo(sc, ft) + converted, err := d.ConvertTo(ctx, ft) if hasError { require.Error(t, err) @@ -204,10 +201,9 @@ func TestConvertToFloat(t *testing.T) { {NewDatum("281.37"), mysql.TypeFloat, "", 281.37, 281.37}, } - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) for _, testCase := range testCases { - converted, err := testCase.d.ConvertTo(sc, NewFieldType(testCase.tp)) + converted, err := testCase.d.ConvertTo(ctx, NewFieldType(testCase.tp)) if testCase.errMsg == "" { require.NoError(t, err) } else { @@ -225,7 +221,7 @@ func TestConvertToFloat(t *testing.T) { } func mustParseTime(s string, tp byte, fsp int) Time { - t, err := ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), s, tp, fsp, nil) + t, err := ParseTime(DefaultStmtNoWarningContext, s, tp, fsp, nil) if err != nil { panic("ParseTime fail") } @@ -241,7 +237,6 @@ func mustParseTimeIntoDatum(s string, tp byte, fsp int) (d Datum) { func TestToJSON(t *testing.T) { ft := NewFieldType(mysql.TypeJSON) - sc := stmtctx.NewStmtCtx() tests := []struct { datum Datum expected interface{} @@ -260,14 +255,14 @@ func TestToJSON(t *testing.T) { {NewStringDatum("hello, 世界"), "", false}, } for _, tt := range tests { - obtain, err := tt.datum.ConvertTo(sc, ft) + obtain, err := tt.datum.ConvertTo(DefaultStmtNoWarningContext, ft) if tt.success { require.NoError(t, err) expected := NewJSONDatum(CreateBinaryJSON(tt.expected)) var cmp int - cmp, err = obtain.Compare(sc, &expected, collate.GetBinaryCollator()) + cmp, err = obtain.Compare(DefaultStmtNoWarningContext, &expected, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) } else { @@ -309,8 +304,6 @@ func TestToBytes(t *testing.T) { {NewStringDatum("abc"), []byte("abc")}, {Datum{}, []byte{}}, } - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) for _, tt := range tests { bin, err := tt.a.ToBytes() require.NoError(t, err) @@ -319,7 +312,6 @@ func TestToBytes(t *testing.T) { } func TestComputePlusAndMinus(t *testing.T) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) tests := []struct { a Datum b Datum @@ -340,7 +332,7 @@ func TestComputePlusAndMinus(t *testing.T) { for ith, tt := range tests { got, err := ComputePlus(tt.a, tt.b) require.Equal(t, tt.hasErr, err != nil) - v, err := got.Compare(sc, &tt.plus, collate.GetBinaryCollator()) + v, err := got.Compare(DefaultStmtNoWarningContext, &tt.plus, collate.GetBinaryCollator()) require.NoError(t, err) require.Equalf(t, 0, v, "%dth got:%#v, %#v, expect:%#v, %#v", ith, got, got.x, tt.plus, tt.plus.x) } @@ -358,11 +350,11 @@ func TestCloneDatum(t *testing.T) { raw, } - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) + for _, tt := range tests { tt1 := *tt.Clone() - res, err := tt.Compare(sc, &tt1, collate.GetBinaryCollator()) + res, err := tt.Compare(ctx, &tt1, collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, res) if tt.b != nil { @@ -412,9 +404,7 @@ func TestEstimatedMemUsage(t *testing.T) { } func TestChangeReverseResultByUpperLowerBound(t *testing.T) { - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) - sc.OverflowAsWarning = true + ctx := DefaultStmtNoWarningContext.WithFlags(DefaultStmtFlags.WithIgnoreTruncateErr(true)) // TODO: add more reserve convert tests for each pair of convert type. testData := []struct { a Datum @@ -499,10 +489,10 @@ func TestChangeReverseResultByUpperLowerBound(t *testing.T) { }, } for ith, test := range testData { - reverseRes, err := ChangeReverseResultByUpperLowerBound(sc, test.retType, test.a, test.roundType) + reverseRes, err := ChangeReverseResultByUpperLowerBound(ctx, test.retType, test.a, test.roundType) require.NoError(t, err) var cmp int - cmp, err = reverseRes.Compare(sc, &test.res, collate.GetBinaryCollator()) + cmp, err = reverseRes.Compare(ctx, &test.res, collate.GetBinaryCollator()) require.NoError(t, err) require.Equalf(t, 0, cmp, "%dth got:%#v, expect:%#v", ith, reverseRes, test.res) } @@ -537,12 +527,10 @@ func TestStringToMysqlBit(t *testing.T) { {NewStringDatum("b'1'"), []byte{1}}, {NewStringDatum("b'0'"), []byte{0}}, } - sc := stmtctx.NewStmtCtx() - sc.SetTypeFlags(sc.TypeFlags().WithIgnoreTruncateErr(true)) tp := NewFieldType(mysql.TypeBit) tp.SetFlen(1) for _, tt := range tests { - bin, err := tt.a.convertToMysqlBit(nil, tp) + bin, err := tt.a.convertToMysqlBit(DefaultStmtNoWarningContext, tp) require.NoError(t, err) require.Equal(t, tt.out, bin.b) } @@ -603,11 +591,10 @@ func TestMarshalDatum(t *testing.T) { func BenchmarkCompareDatum(b *testing.B) { vals, vals1 := prepareCompareDatums() - sc := stmtctx.NewStmtCtx() b.ResetTimer() for i := 0; i < b.N; i++ { for j, v := range vals { - _, err := v.Compare(sc, &vals1[j], collate.GetBinaryCollator()) + _, err := v.Compare(DefaultStmtNoWarningContext, &vals1[j], collate.GetBinaryCollator()) if err != nil { b.Fatal(err) } @@ -650,11 +637,12 @@ func TestProduceDecWithSpecifiedTp(t *testing.T) { {"99.9999", 6, 3, "100.000", false, true}, {"-99.9999", 6, 3, "-100.000", false, true}, } - sc := stmtctx.NewStmtCtx() + warnings := &warnStore{} + ctx := NewContext(DefaultStmtFlags, time.UTC, warnings.AppendWarning) for _, tt := range tests { tp := NewFieldTypeBuilder().SetType(mysql.TypeNewDecimal).SetFlen(tt.flen).SetDecimal(tt.frac).BuildP() dec := NewDecFromStringForTest(tt.dec) - newDec, err := ProduceDecWithSpecifiedTp(dec, tp, sc) + newDec, err := ProduceDecWithSpecifiedTp(ctx, dec, tp) if tt.isOverflow { if !ErrOverflow.Equal(err) { assert.FailNow(t, "Error is not overflow", "err: %v before: %v after: %v", err, tt.dec, dec) @@ -663,9 +651,10 @@ func TestProduceDecWithSpecifiedTp(t *testing.T) { require.NoError(t, err, tt) } require.Equal(t, tt.newDec, newDec.String()) - warn := sc.TruncateWarnings(0) + warn := warnings.GetWarnings() + warnings.Reset() if tt.isTruncated { - if len(warn) != 1 || !ErrTruncatedWrongVal.Equal(warn[0].Err) { + if len(warn) != 1 || !ErrTruncatedWrongVal.Equal(warn[0]) { assert.FailNow(t, "Warn is not truncated", "warn: %v before: %v after: %v", warn, tt.dec, dec) } } else { @@ -696,9 +685,8 @@ func TestNULLNotEqualWithOthers(t *testing.T) { MaxValueDatum(), } nullDatum := NewDatum(nil) - sc := stmtctx.NewStmtCtx() for _, d := range datums { - result, err := d.Compare(sc, &nullDatum, collate.GetBinaryCollator()) + result, err := d.Compare(DefaultStmtNoWarningContext, &nullDatum, collate.GetBinaryCollator()) require.NoError(t, err) require.NotEqual(t, 0, result) } diff --git a/pkg/types/errors.go b/pkg/types/errors.go index 8c7dffcf73b8b..c245816562ace 100644 --- a/pkg/types/errors.go +++ b/pkg/types/errors.go @@ -64,7 +64,7 @@ var ( ErrDuplicatedValueInType = dbterror.ClassTypes.NewStd(mysql.ErrDuplicatedValueInType) // ErrDatetimeFunctionOverflow is returned when the calculation in datetime function cause overflow. ErrDatetimeFunctionOverflow = dbterror.ClassTypes.NewStd(mysql.ErrDatetimeFunctionOverflow) - // ErrCastAsSignedOverflow is returned when positive out-of-range integer, and convert to it's negative complement. + // ErrCastAsSignedOverflow is returned when positive out-of-range integer, and convert to its negative complement. ErrCastAsSignedOverflow = dbterror.ClassTypes.NewStd(mysql.ErrCastAsSignedOverflow) // ErrCastNegIntAsUnsigned is returned when a negative integer be casted to an unsigned int. ErrCastNegIntAsUnsigned = dbterror.ClassTypes.NewStd(mysql.ErrCastNegIntAsUnsigned) diff --git a/pkg/types/format_test.go b/pkg/types/format_test.go index b48cec6212acf..a63d8f161ee7e 100644 --- a/pkg/types/format_test.go +++ b/pkg/types/format_test.go @@ -16,16 +16,15 @@ package types_test import ( "testing" + "time" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" ) func TestTimeFormatMethod(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) tblDate := []struct { Input string Format string @@ -69,7 +68,7 @@ func TestTimeFormatMethod(t *testing.T) { }, } for i, tt := range tblDate { - tm, err := types.ParseTime(sc, tt.Input, mysql.TypeDatetime, 6, nil) + tm, err := types.ParseTime(typeCtx, tt.Input, mysql.TypeDatetime, 6, nil) require.NoErrorf(t, err, "Parse time fail: %s", tt.Input) str, err := tm.DateFormat(tt.Format) @@ -79,8 +78,7 @@ func TestTimeFormatMethod(t *testing.T) { } func TestStrToDate(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) tests := []struct { input string format string @@ -157,9 +155,9 @@ func TestStrToDate(t *testing.T) { {"30/Feb/2016 12:34:56.1234", "%d/%b/%Y %H:%i:%S.%f", types.FromDate(2016, 2, 30, 12, 34, 56, 123400)}, // Feb 30th } for i, tt := range tests { - sc.AllowInvalidDate = true + typeCtx = typeCtx.WithFlags(typeCtx.Flags().WithIgnoreInvalidDateErr(true)) var time types.Time - require.Truef(t, time.StrToDate(sc, tt.input, tt.format), "no.%d failed input=%s format=%s", i, tt.input, tt.format) + require.Truef(t, time.StrToDate(typeCtx, tt.input, tt.format), "no.%d failed input=%s format=%s", i, tt.input, tt.format) require.Equalf(t, tt.expect, time.CoreTime(), "no.%d failed input=%s format=%s", i, tt.input, tt.format) } @@ -192,8 +190,8 @@ func TestStrToDate(t *testing.T) { {"11:13:56a", "%r"}, // EOF while parsing "AM"/"PM" } for i, tt := range errTests { - sc.AllowInvalidDate = false + typeCtx = typeCtx.WithFlags(typeCtx.Flags().WithIgnoreInvalidDateErr(false)) var time types.Time - require.Falsef(t, time.StrToDate(sc, tt.input, tt.format), "no.%d failed input=%s format=%s", i, tt.input, tt.format) + require.Falsef(t, time.StrToDate(typeCtx, tt.input, tt.format), "no.%d failed input=%s format=%s", i, tt.input, tt.format) } } diff --git a/pkg/types/mydecimal.go b/pkg/types/mydecimal.go index baca38fef4443..f2127bf4fbfe2 100644 --- a/pkg/types/mydecimal.go +++ b/pkg/types/mydecimal.go @@ -24,7 +24,6 @@ import ( "github.com/pingcap/log" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/util/mathutil" "go.uber.org/zap" ) @@ -354,7 +353,7 @@ func (d *MyDecimal) ToString() (str []byte) { for ; digitsFrac > 0; digitsFrac -= digitsPerWord { x := d.wordBuf[wordIdx] wordIdx++ - for i := mathutil.Min(digitsFrac, digitsPerWord); i > 0; i-- { + for i := min(digitsFrac, digitsPerWord); i > 0; i-- { y := x / digMask str[fracIdx] = byte(y) + '0' fracIdx++ @@ -381,7 +380,7 @@ func (d *MyDecimal) ToString() (str []byte) { for ; digitsInt > 0; digitsInt -= digitsPerWord { wordIdx-- x := d.wordBuf[wordIdx] - for i := mathutil.Min(digitsInt, digitsPerWord); i > 0; i-- { + for i := min(digitsInt, digitsPerWord); i > 0; i-- { y := x / 10 strIdx-- str[strIdx] = '0' + byte(x-y*10) @@ -841,7 +840,7 @@ func (d *MyDecimal) Round(to *MyDecimal, frac int, roundMode RoundMode) (err err if to != d { copy(to.wordBuf[:], d.wordBuf[:]) to.negative = d.negative - to.digitsInt = int8(mathutil.Min(wordsInt, wordBufLen) * digitsPerWord) + to.digitsInt = int8(min(wordsInt, wordBufLen) * digitsPerWord) } if wordsFracTo > wordsFrac { idx := wordsInt + wordsFrac @@ -942,7 +941,7 @@ func (d *MyDecimal) Round(to *MyDecimal, frac int, roundMode RoundMode) (err err frac = wordsFracTo * digitsPerWord err = ErrTruncated } - for toIdx = wordsInt + mathutil.Max(wordsFracTo, 0); toIdx > 0; toIdx-- { + for toIdx = wordsInt + max(wordsFracTo, 0); toIdx > 0; toIdx-- { if toIdx < wordBufLen { to.wordBuf[toIdx] = to.wordBuf[toIdx-1] } else { @@ -966,7 +965,7 @@ func (d *MyDecimal) Round(to *MyDecimal, frac int, roundMode RoundMode) (err err /* making 'zero' with the proper scale */ idx := wordsFracTo + 1 to.digitsInt = 1 - to.digitsFrac = int8(mathutil.Max(frac, 0)) + to.digitsFrac = int8(max(frac, 0)) to.negative = false for toIdx < idx { to.wordBuf[toIdx] = 0 @@ -1603,7 +1602,7 @@ func DecimalNeg(from *MyDecimal) *MyDecimal { // of `to` may be changed during evaluating. func DecimalAdd(from1, from2, to *MyDecimal) error { from1, from2, to = validateArgs(from1, from2, to) - to.resultFrac = mathutil.Max(from1.resultFrac, from2.resultFrac) + to.resultFrac = max(from1.resultFrac, from2.resultFrac) if from1.negative == from2.negative { return doAdd(from1, from2, to) } @@ -1614,7 +1613,7 @@ func DecimalAdd(from1, from2, to *MyDecimal) error { // DecimalSub subs one decimal from another, sets the result to 'to'. func DecimalSub(from1, from2, to *MyDecimal) error { from1, from2, to = validateArgs(from1, from2, to) - to.resultFrac = mathutil.Max(from1.resultFrac, from2.resultFrac) + to.resultFrac = max(from1.resultFrac, from2.resultFrac) if from1.negative == from2.negative { _, err := doSub(from1, from2, to) return err @@ -1650,7 +1649,7 @@ func doSub(from1, from2, to *MyDecimal) (cmp int, err error) { wordsFrac1 = digitsToWords(int(from1.digitsFrac)) wordsInt2 = digitsToWords(int(from2.digitsInt)) wordsFrac2 = digitsToWords(int(from2.digitsFrac)) - wordsFracTo = mathutil.Max(wordsFrac1, wordsFrac2) + wordsFracTo = max(wordsFrac1, wordsFrac2) start1 = 0 stop1 = wordsInt1 @@ -1815,8 +1814,8 @@ func doAdd(from1, from2, to *MyDecimal) error { wordsFrac1 = digitsToWords(int(from1.digitsFrac)) wordsInt2 = digitsToWords(int(from2.digitsInt)) wordsFrac2 = digitsToWords(int(from2.digitsFrac)) - wordsIntTo = mathutil.Max(wordsInt1, wordsInt2) - wordsFracTo = mathutil.Max(wordsFrac1, wordsFrac2) + wordsIntTo = max(wordsInt1, wordsInt2) + wordsFracTo = max(wordsFrac1, wordsFrac2) ) var x int32 @@ -1840,7 +1839,7 @@ func doAdd(from1, from2, to *MyDecimal) error { idxTo := wordsIntTo + wordsFracTo to.negative = from1.negative to.digitsInt = int8(wordsIntTo * digitsPerWord) - to.digitsFrac = mathutil.Max(from1.digitsFrac, from2.digitsFrac) + to.digitsFrac = max(from1.digitsFrac, from2.digitsFrac) if err != nil { if to.digitsFrac > int8(wordsFracTo*digitsPerWord) { @@ -1978,7 +1977,7 @@ func DecimalMul(from1, from2, to *MyDecimal) error { tmp1 = wordsIntTo tmp2 = wordsFracTo ) - to.resultFrac = mathutil.Min(from1.resultFrac+from2.resultFrac, mysql.MaxDecimalScale) + to.resultFrac = min(from1.resultFrac+from2.resultFrac, mysql.MaxDecimalScale) wordsIntTo, wordsFracTo, err = fixWordCntError(wordsIntTo, wordsFracTo) to.negative = from1.negative != from2.negative to.digitsFrac = from1.digitsFrac + from2.digitsFrac @@ -2093,7 +2092,7 @@ func DecimalMul(from1, from2, to *MyDecimal) error { // fracIncr - increment of fraction func DecimalDiv(from1, from2, to *MyDecimal, fracIncr int) error { from1, from2, to = validateArgs(from1, from2, to) - to.resultFrac = mathutil.Min(from1.resultFrac+int8(fracIncr), mysql.MaxDecimalScale) + to.resultFrac = min(from1.resultFrac+int8(fracIncr), mysql.MaxDecimalScale) return doDivMod(from1, from2, to, nil, fracIncr) } @@ -2123,7 +2122,7 @@ DecimalMod does modulus of two decimals. */ func DecimalMod(from1, from2, to *MyDecimal) error { from1, from2, to = validateArgs(from1, from2, to) - to.resultFrac = mathutil.Max(from1.resultFrac, from2.resultFrac) + to.resultFrac = max(from1.resultFrac, from2.resultFrac) return doDivMod(from1, from2, nil, to, 0) } @@ -2191,7 +2190,7 @@ func doDivMod(from1, from2, to, mod *MyDecimal, fracIncr int) error { // digitsFrac=max(frac1, frac2), as for subtraction // digitsInt=from2.digitsInt to.negative = from1.negative - to.digitsFrac = mathutil.Max(from1.digitsFrac, from2.digitsFrac) + to.digitsFrac = max(from1.digitsFrac, from2.digitsFrac) } else { wordsFracTo = digitsToWords(frac1 + frac2 + fracIncr) wordsIntTo, wordsFracTo, err = fixWordCntError(wordsIntTo, wordsFracTo) @@ -2356,7 +2355,7 @@ func doDivMod(from1, from2, to, mod *MyDecimal, fracIncr int) error { return ErrOverflow } stop1 = start1 + wordsIntTo + wordsFracTo - to.digitsInt = int8(mathutil.Min(wordsIntTo*digitsPerWord, int(from2.digitsInt))) + to.digitsInt = int8(min(wordsIntTo*digitsPerWord, int(from2.digitsInt))) } if wordsIntTo+wordsFracTo > wordBufLen { stop1 -= wordsIntTo + wordsFracTo - wordBufLen diff --git a/pkg/types/time.go b/pkg/types/time.go index 012f3c4e27df0..cadb6968cfa07 100644 --- a/pkg/types/time.go +++ b/pkg/types/time.go @@ -30,7 +30,6 @@ import ( "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/mathutil" @@ -453,7 +452,7 @@ func (t Time) FillNumber(dec *MyDecimal) { } // Convert converts t with type tp. -func (t Time) Convert(sc *stmtctx.StatementContext, tp uint8) (Time, error) { +func (t Time) Convert(ctx Context, tp uint8) (Time, error) { t1 := t if t.Type() == tp || t.IsZero() { t1.SetType(tp) @@ -461,7 +460,7 @@ func (t Time) Convert(sc *stmtctx.StatementContext, tp uint8) (Time, error) { } t1.SetType(tp) - err := t1.check(sc, nil) + err := t1.check(ctx, nil) return t1, errors.Trace(err) } @@ -490,9 +489,9 @@ func (t Time) Compare(o Time) int { // CompareString is like Compare, // but parses string to Time then compares. -func (t Time) CompareString(sc *stmtctx.StatementContext, str string) (int, error) { +func (t Time) CompareString(ctx Context, str string) (int, error) { // use MaxFsp to parse the string - o, err := ParseTime(sc, str, t.Type(), MaxFsp, nil) + o, err := ParseTime(ctx, str, t.Type(), MaxFsp, nil) if err != nil { return 0, errors.Trace(err) } @@ -507,7 +506,7 @@ func roundTime(t gotime.Time, fsp int) gotime.Time { } // RoundFrac rounds the fraction part of a time-type value according to `fsp`. -func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) { +func (t Time) RoundFrac(ctx Context, fsp int) (Time, error) { if t.Type() == mysql.TypeDate || t.IsZero() { // date type has no fsp return t, nil @@ -524,13 +523,13 @@ func (t Time) RoundFrac(sc *stmtctx.StatementContext, fsp int) (Time, error) { } var nt CoreTime - if t1, err := t.GoTime(sc.TimeZone()); err == nil { + if t1, err := t.GoTime(ctx.Location()); err == nil { t1 = roundTime(t1, fsp) nt = FromGoTime(t1) } else { // Take the hh:mm:ss part out to avoid handle month or day = 0. hour, minute, second, microsecond := t.Hour(), t.Minute(), t.Second(), t.Microsecond() - t1 := gotime.Date(1, 1, 1, hour, minute, second, microsecond*1000, sc.TimeZone()) + t1 := gotime.Date(1, 1, 1, hour, minute, second, microsecond*1000, ctx.Location()) t2 := roundTime(t1, fsp) hour, minute, second = t2.Clock() microsecond = t2.Nanosecond() / 1000 @@ -674,18 +673,13 @@ func (t *Time) FromPackedUint(packed uint64) error { // check whether t matches valid Time format. // If allowZeroInDate is false, it returns ErrZeroDate when month or day is zero. // FIXME: See https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html#sqlmode_no_zero_in_date -func (t Time) check(sc *stmtctx.StatementContext, explicitTz *gotime.Location) error { - allowZeroInDate := false - allowInvalidDate := false - // We should avoid passing sc as nil here as far as possible. - if sc != nil { - allowZeroInDate = sc.IgnoreZeroInDate - allowInvalidDate = sc.AllowInvalidDate - } +func (t Time) check(ctx Context, explicitTz *gotime.Location) error { + allowZeroInDate := ctx.Flags().IgnoreZeroInDate() + allowInvalidDate := ctx.Flags().IgnoreInvalidDateErr() var err error switch t.Type() { case mysql.TypeTimestamp: - err = checkTimestampType(sc, t.coreTime, explicitTz) + err = checkTimestampType(ctx, t.coreTime, explicitTz) case mysql.TypeDatetime, mysql.TypeDate: err = checkDatetimeType(t.coreTime, allowZeroInDate, allowInvalidDate) } @@ -693,18 +687,18 @@ func (t Time) check(sc *stmtctx.StatementContext, explicitTz *gotime.Location) e } // Check if 't' is valid -func (t *Time) Check(sc *stmtctx.StatementContext) error { - return t.check(sc, nil) +func (t *Time) Check(ctx Context) error { + return t.check(ctx, nil) } // Sub subtracts t1 from t, returns a duration value. // Note that sub should not be done on different time types. -func (t *Time) Sub(sc *stmtctx.StatementContext, t1 *Time) Duration { +func (t *Time) Sub(ctx Context, t1 *Time) Duration { var duration gotime.Duration if t.Type() == mysql.TypeTimestamp && t1.Type() == mysql.TypeTimestamp { - a, err := t.GoTime(sc.TimeZone()) + a, err := t.GoTime(ctx.Location()) terror.Log(errors.Trace(err)) - b, err := t1.GoTime(sc.TimeZone()) + b, err := t1.GoTime(ctx.Location()) terror.Log(errors.Trace(err)) duration = a.Sub(b) } else { @@ -727,7 +721,7 @@ func (t *Time) Sub(sc *stmtctx.StatementContext, t1 *Time) Duration { } // Add adds d to t, returns the result time value. -func (t *Time) Add(sc *stmtctx.StatementContext, d Duration) (Time, error) { +func (t *Time) Add(ctx Context, d Duration) (Time, error) { seconds, microseconds, _ := calcTimeDurationDiff(t.coreTime, d) days := seconds / secondsIn24Hour year, month, day := getDateFromDaynr(uint(days)) @@ -747,7 +741,7 @@ func (t *Time) Add(sc *stmtctx.StatementContext, d Duration) (Time, error) { fsp = d.Fsp } ret := NewTime(tm.coreTime, t.Type(), fsp) - return ret, ret.Check(sc) + return ret, ret.Check(ctx) } // TimestampDiff returns t2 - t1 where t1 and t2 are date or datetime expressions. @@ -953,7 +947,7 @@ func splitDateTime(format string) (seps []string, fracStr string, hasTZ bool, tz } // See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-literals.html. -func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bool, explicitTz *gotime.Location) (Time, error) { +func parseDatetime(ctx Context, str string, fsp int, isFloat bool, explicitTz *gotime.Location) (Time, error) { var ( year, month, day, hour, minute, second, deltaHour, deltaMinute int fracStr string @@ -964,7 +958,7 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo seps, fracStr, hasTZ, tzSign, tzHour, tzSep, tzMinute, truncatedOrIncorrect := splitDateTime(str) if truncatedOrIncorrect { - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) + ctx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) } /* if we have timezone parsed, there are the following cases to be considered, however some of them are wrongly parsed, and we should consider absorb them back to seps. @@ -1045,12 +1039,12 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo l := len(seps[0]) // Values specified as numbers if isFloat { - numOfTime, err := StrToInt(sc.TypeCtxOrDefault(), seps[0], false) + numOfTime, err := StrToInt(ctx, seps[0], false) if err != nil { return ZeroDatetime, errors.Trace(ErrWrongValue.GenWithStackByArgs(DateTimeStr, str)) } - dateTime, err := ParseDatetimeFromNum(sc, numOfTime) + dateTime, err := ParseDatetimeFromNum(ctx, numOfTime) if err != nil { return ZeroDatetime, errors.Trace(ErrWrongValue.GenWithStackByArgs(DateTimeStr, str)) } @@ -1126,8 +1120,8 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo } truncatedOrIncorrect = err != nil } - if truncatedOrIncorrect && sc != nil { - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) + if truncatedOrIncorrect { + ctx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) err = nil } case 2: @@ -1150,9 +1144,7 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo // For case like `2020-05-28 23:59:59 00:00:00`, the seps should be > 6, the reluctant parts should be truncated. seps = seps[:6] // YYYY-MM-DD HH-MM-SS - if sc != nil { - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) - } + ctx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs("datetime", str)) err = scanTimeArgs(seps, &year, &month, &day, &hour, &minute, &second) hhmmss = true } @@ -1194,7 +1186,7 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo if explicitTz != nil { t1, err = tmp.GoTime(explicitTz) } else { - t1, err = tmp.GoTime(sc.TimeZone()) + t1, err = tmp.GoTime(ctx.Location()) } if err != nil { return ZeroDatetime, errors.Trace(err) @@ -1231,7 +1223,7 @@ func parseDatetime(sc *stmtctx.StatementContext, str string, fsp int, isFloat bo if explicitTz != nil { t1 = t1.In(explicitTz) } else { - t1 = t1.In(sc.TimeZone()) + t1 = t1.In(ctx.Location()) } tmp = FromGoTime(t1) } @@ -1500,24 +1492,24 @@ func (d Duration) ToNumber() *MyDecimal { // ConvertToTime converts duration to Time. // Tp is TypeDatetime, TypeTimestamp and TypeDate. -func (d Duration) ConvertToTime(sc *stmtctx.StatementContext, tp uint8) (Time, error) { - year, month, day := gotime.Now().In(sc.TimeZone()).Date() +func (d Duration) ConvertToTime(ctx Context, tp uint8) (Time, error) { + year, month, day := gotime.Now().In(ctx.Location()).Date() datePart := FromDate(year, int(month), day, 0, 0, 0, 0) mixDateAndDuration(&datePart, d) t := NewTime(datePart, mysql.TypeDatetime, d.Fsp) - return t.Convert(sc, tp) + return t.Convert(ctx, tp) } // ConvertToTimeWithTimestamp converts duration to Time by system timestamp. // Tp is TypeDatetime, TypeTimestamp and TypeDate. -func (d Duration) ConvertToTimeWithTimestamp(sc *stmtctx.StatementContext, tp uint8, ts gotime.Time) (Time, error) { - year, month, day := ts.In(sc.TimeZone()).Date() +func (d Duration) ConvertToTimeWithTimestamp(ctx Context, tp uint8, ts gotime.Time) (Time, error) { + year, month, day := ts.In(ctx.Location()).Date() datePart := FromDate(year, int(month), day, 0, 0, 0, 0) mixDateAndDuration(&datePart, d) t := NewTime(datePart, mysql.TypeDatetime, d.Fsp) - return t.Convert(sc, tp) + return t.Convert(ctx, tp) } // RoundFrac rounds fractional seconds precision with new fsp and returns a new one. @@ -1559,9 +1551,9 @@ func (d Duration) Compare(o Duration) int { // CompareString is like Compare, // but parses str to Duration then compares. -func (d Duration) CompareString(sc *stmtctx.StatementContext, str string) (int, error) { +func (d Duration) CompareString(ctx Context, str string) (int, error) { // use MaxFsp to parse the string - o, _, err := ParseDuration(sc, str, MaxFsp) + o, _, err := ParseDuration(ctx, str, MaxFsp) if err != nil { return 0, err } @@ -1813,7 +1805,7 @@ func canFallbackToDateTime(str string) bool { // ParseDuration parses the time form a formatted string with a fractional seconds part, // returns the duration type Time value and bool to indicate whether the result is null. // See http://dev.mysql.com/doc/refman/5.7/en/fractional-seconds.html -func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, bool, error) { +func ParseDuration(ctx Context, str string, fsp int) (Duration, bool, error) { rest := strings.TrimSpace(str) d, isNull, err := matchDuration(rest, fsp) if err == nil { @@ -1823,7 +1815,7 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, return d, isNull, ErrTruncatedWrongVal.GenWithStackByArgs("time", str) } - datetime, err := ParseDatetime(sc, rest) + datetime, err := ParseDatetime(ctx, rest) if err != nil { return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str) } @@ -1833,7 +1825,7 @@ func ParseDuration(sc *stmtctx.StatementContext, str string, fsp int) (Duration, return ZeroDuration, true, ErrTruncatedWrongVal.GenWithStackByArgs("time", str) } - d, err = d.RoundFrac(fsp, sc.TimeZone()) + d, err = d.RoundFrac(fsp, ctx.Location()) return d, false, err } @@ -1868,7 +1860,7 @@ func splitDuration(t gotime.Duration) (sign int, hours int, minutes int, seconds var maxDaysInMonth = []int{31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31} -func getTime(sc *stmtctx.StatementContext, num, originNum int64, tp byte) (Time, error) { +func getTime(ctx Context, num, originNum int64, tp byte) (Time, error) { s1 := num / 1000000 s2 := num - s1*1000000 @@ -1888,14 +1880,14 @@ func getTime(sc *stmtctx.StatementContext, num, originNum int64, tp byte) (Time, return ZeroDatetime, errors.Trace(ErrWrongValue.GenWithStackByArgs(TimeStr, numStr)) } t := NewTime(ct, tp, DefaultFsp) - err := t.check(sc, nil) + err := t.check(ctx, nil) return t, errors.Trace(err) } // parseDateTimeFromNum parses date time from num. // See number_to_datetime function. // https://github.com/mysql/mysql-server/blob/5.7/sql-common/my_time.c -func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) { +func parseDateTimeFromNum(ctx Context, num int64) (Time, error) { t := ZeroDate // Check zero. if num == 0 { @@ -1906,7 +1898,7 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // Check datetime type. if num >= 10000101000000 { t.SetType(mysql.TypeDatetime) - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Check MMDD. @@ -1918,7 +1910,7 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // YYMMDD, year: 2000-2069 if num <= (70-1)*10000+1231 { num = (num + 20000000) * 1000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Check YYMMDD. @@ -1930,13 +1922,13 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // YYMMDD, year: 1970-1999 if num <= 991231 { num = (num + 19000000) * 1000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Adjust hour/min/second. if num <= 99991231 { num = num * 1000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Check MMDDHHMMSS. @@ -1951,7 +1943,7 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // YYMMDDHHMMSS, 2000-2069 if num <= 69*10000000000+1231235959 { num = num + 20000000000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // Check YYYYMMDDHHMMSS. @@ -1963,10 +1955,10 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // YYMMDDHHMMSS, 1970-1999 if num <= 991231235959 { num = num + 19000000000000 - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } - return getTime(sc, num, originNum, t.Type()) + return getTime(ctx, num, originNum, t.Type()) } // ParseTime parses a formatted string with type tp and specific fsp. @@ -1981,56 +1973,56 @@ func parseDateTimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) // The valid timestamp range is from '1970-01-01 00:00:01.000000' to '2038-01-19 03:14:07.999999'. // The valid date range is from '1000-01-01' to '9999-12-31' // explicitTz is used to handle a data race of timeZone, refer to https://github.com/pingcap/tidb/issues/40710. It only works for timestamp now, be careful to use it! -func ParseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int, explicitTz *gotime.Location) (Time, error) { - return parseTime(sc, str, tp, fsp, false, explicitTz) +func ParseTime(ctx Context, str string, tp byte, fsp int, explicitTz *gotime.Location) (Time, error) { + return parseTime(ctx, str, tp, fsp, false, explicitTz) } // ParseTimeFromFloatString is similar to ParseTime, except that it's used to parse a float converted string. -func ParseTimeFromFloatString(sc *stmtctx.StatementContext, str string, tp byte, fsp int) (Time, error) { +func ParseTimeFromFloatString(ctx Context, str string, tp byte, fsp int) (Time, error) { // MySQL compatibility: 0.0 should not be converted to null, see #11203 if len(str) >= 3 && str[:3] == "0.0" { return NewTime(ZeroCoreTime, tp, DefaultFsp), nil } - return parseTime(sc, str, tp, fsp, true, nil) + return parseTime(ctx, str, tp, fsp, true, nil) } -func parseTime(sc *stmtctx.StatementContext, str string, tp byte, fsp int, isFloat bool, explicitTz *gotime.Location) (Time, error) { +func parseTime(ctx Context, str string, tp byte, fsp int, isFloat bool, explicitTz *gotime.Location) (Time, error) { fsp, err := CheckFsp(fsp) if err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } - t, err := parseDatetime(sc, str, fsp, isFloat, explicitTz) + t, err := parseDatetime(ctx, str, fsp, isFloat, explicitTz) if err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } t.SetType(tp) - if err = t.check(sc, explicitTz); err != nil { + if err = t.check(ctx, explicitTz); err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } return t, nil } // ParseDatetime is a helper function wrapping ParseTime with datetime type and default fsp. -func ParseDatetime(sc *stmtctx.StatementContext, str string) (Time, error) { - return ParseTime(sc, str, mysql.TypeDatetime, GetFsp(str), nil) +func ParseDatetime(ctx Context, str string) (Time, error) { + return ParseTime(ctx, str, mysql.TypeDatetime, GetFsp(str), nil) } // ParseTimestamp is a helper function wrapping ParseTime with timestamp type and default fsp. -func ParseTimestamp(sc *stmtctx.StatementContext, str string) (Time, error) { - return ParseTime(sc, str, mysql.TypeTimestamp, GetFsp(str), nil) +func ParseTimestamp(ctx Context, str string) (Time, error) { + return ParseTime(ctx, str, mysql.TypeTimestamp, GetFsp(str), nil) } // ParseDate is a helper function wrapping ParseTime with date type. -func ParseDate(sc *stmtctx.StatementContext, str string) (Time, error) { +func ParseDate(ctx Context, str string) (Time, error) { // date has no fractional seconds precision - return ParseTime(sc, str, mysql.TypeDate, MinFsp, nil) + return ParseTime(ctx, str, mysql.TypeDate, MinFsp, nil) } // ParseTimeFromYear parse a `YYYY` formed year to corresponded Datetime type. // Note: the invoker must promise the `year` is in the range [MinYear, MaxYear]. -func ParseTimeFromYear(_ *stmtctx.StatementContext, year int64) (Time, error) { +func ParseTimeFromYear(year int64) (Time, error) { if year == 0 { return NewTime(ZeroCoreTime, mysql.TypeDate, DefaultFsp), nil } @@ -2041,11 +2033,11 @@ func ParseTimeFromYear(_ *stmtctx.StatementContext, year int64) (Time, error) { // ParseTimeFromNum parses a formatted int64, // returns the value which type is tp. -func ParseTimeFromNum(sc *stmtctx.StatementContext, num int64, tp byte, fsp int) (Time, error) { +func ParseTimeFromNum(ctx Context, num int64, tp byte, fsp int) (Time, error) { // MySQL compatibility: 0 should not be converted to null, see #11203 if num == 0 { zt := NewTime(ZeroCoreTime, tp, DefaultFsp) - if sc != nil && sc.InCreateOrAlterStmt && !sc.TypeFlags().TruncateAsWarning() && sc.NoZeroDate { + if !ctx.Flags().IgnoreZeroDateErr() { switch tp { case mysql.TypeTimestamp: return zt, ErrTruncatedWrongVal.GenWithStackByArgs(TimestampStr, "0") @@ -2062,33 +2054,33 @@ func ParseTimeFromNum(sc *stmtctx.StatementContext, num int64, tp byte, fsp int) return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } - t, err := parseDateTimeFromNum(sc, num) + t, err := parseDateTimeFromNum(ctx, num) if err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } t.SetType(tp) t.SetFsp(fsp) - if err := t.check(sc, nil); err != nil { + if err := t.check(ctx, nil); err != nil { return NewTime(ZeroCoreTime, tp, DefaultFsp), errors.Trace(err) } return t, nil } // ParseDatetimeFromNum is a helper function wrapping ParseTimeFromNum with datetime type and default fsp. -func ParseDatetimeFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) { - return ParseTimeFromNum(sc, num, mysql.TypeDatetime, DefaultFsp) +func ParseDatetimeFromNum(ctx Context, num int64) (Time, error) { + return ParseTimeFromNum(ctx, num, mysql.TypeDatetime, DefaultFsp) } // ParseTimestampFromNum is a helper function wrapping ParseTimeFromNum with timestamp type and default fsp. -func ParseTimestampFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) { - return ParseTimeFromNum(sc, num, mysql.TypeTimestamp, DefaultFsp) +func ParseTimestampFromNum(ctx Context, num int64) (Time, error) { + return ParseTimeFromNum(ctx, num, mysql.TypeTimestamp, DefaultFsp) } // ParseDateFromNum is a helper function wrapping ParseTimeFromNum with date type. -func ParseDateFromNum(sc *stmtctx.StatementContext, num int64) (Time, error) { +func ParseDateFromNum(ctx Context, num int64) (Time, error) { // date has no fractional seconds precision - return ParseTimeFromNum(sc, num, mysql.TypeDate, MinFsp) + return ParseTimeFromNum(ctx, num, mysql.TypeDate, MinFsp) } // TimeFromDays Converts a day number to a date. @@ -2158,17 +2150,13 @@ func checkMonthDay(year, month, day int, allowInvalidDate bool) error { return nil } -func checkTimestampType(sc *stmtctx.StatementContext, t CoreTime, explicitTz *gotime.Location) error { +func checkTimestampType(ctx Context, t CoreTime, explicitTz *gotime.Location) error { if compareTime(t, ZeroCoreTime) == 0 { return nil } - if sc == nil { - return errors.New("statementContext is required during checkTimestampType") - } - var checkTime CoreTime - tz := sc.TimeZone() + tz := ctx.Location() if explicitTz != nil { tz = explicitTz } @@ -2650,16 +2638,16 @@ func IsDateFormat(format string) bool { } // ParseTimeFromInt64 parses mysql time value from int64. -func ParseTimeFromInt64(sc *stmtctx.StatementContext, num int64) (Time, error) { - return parseDateTimeFromNum(sc, num) +func ParseTimeFromInt64(ctx Context, num int64) (Time, error) { + return parseDateTimeFromNum(ctx, num) } // ParseTimeFromFloat64 parses mysql time value from float64. // It is used in scenarios that distinguish date and datetime, e.g., date_add/sub() with first argument being real. // For example, 20010203 parses to date (no HMS) and 20010203040506 parses to datetime (with HMS). -func ParseTimeFromFloat64(sc *stmtctx.StatementContext, f float64) (Time, error) { +func ParseTimeFromFloat64(ctx Context, f float64) (Time, error) { intPart := int64(f) - t, err := parseDateTimeFromNum(sc, intPart) + t, err := parseDateTimeFromNum(ctx, intPart) if err != nil { return ZeroTime, err } @@ -2676,13 +2664,13 @@ func ParseTimeFromFloat64(sc *stmtctx.StatementContext, f float64) (Time, error) // ParseTimeFromDecimal parses mysql time value from decimal. // It is used in scenarios that distinguish date and datetime, e.g., date_add/sub() with first argument being decimal. // For example, 20010203 parses to date (no HMS) and 20010203040506 parses to datetime (with HMS). -func ParseTimeFromDecimal(sc *stmtctx.StatementContext, dec *MyDecimal) (t Time, err error) { +func ParseTimeFromDecimal(ctx Context, dec *MyDecimal) (t Time, err error) { intPart, err := dec.ToInt() if err != nil && !terror.ErrorEqual(err, ErrTruncated) { return ZeroTime, err } - fsp := mathutil.Min(MaxFsp, int(dec.GetDigitsFrac())) - t, err = parseDateTimeFromNum(sc, intPart) + fsp := min(MaxFsp, int(dec.GetDigitsFrac())) + t, err = parseDateTimeFromNum(ctx, intPart) if err != nil { return ZeroTime, err } @@ -2894,7 +2882,7 @@ func abbrDayOfMonth(day int) string { // StrToDate converts date string according to format. // See https://dev.mysql.com/doc/refman/5.7/en/date-and-time-functions.html#function_date-format -func (t *Time) StrToDate(sc *stmtctx.StatementContext, date, format string) bool { +func (t *Time) StrToDate(typeCtx Context, date, format string) bool { ctx := make(map[string]int) var tm CoreTime success, warning := strToDate(&tm, date, format, ctx) @@ -2910,13 +2898,13 @@ func (t *Time) StrToDate(sc *stmtctx.StatementContext, date, format string) bool t.SetCoreTime(tm) t.SetType(mysql.TypeDatetime) - if t.check(sc, nil) != nil { + if t.check(typeCtx, nil) != nil { return false } if warning { // Only append this warning when success but still need warning. // Currently this only happens when `date` has extra characters at the end. - sc.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs(DateTimeStr, date)) + typeCtx.AppendWarning(ErrTruncatedWrongVal.GenWithStackByArgs(DateTimeStr, date)) } return true } @@ -3434,8 +3422,8 @@ func DateFSP(date string) (fsp int) { // DateTimeIsOverflow returns if this date is overflow. // See: https://dev.mysql.com/doc/refman/8.0/en/datetime.html -func DateTimeIsOverflow(sc *stmtctx.StatementContext, date Time) (bool, error) { - tz := sc.TimeZone() +func DateTimeIsOverflow(ctx Context, date Time) (bool, error) { + tz := ctx.Location() if tz == nil { logutil.BgLogger().Warn("use gotime.local because sc.timezone is nil") tz = gotime.Local diff --git a/pkg/types/time_test.go b/pkg/types/time_test.go index 67008aee63707..0af88d4aaf276 100644 --- a/pkg/types/time_test.go +++ b/pkg/types/time_test.go @@ -25,9 +25,7 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mock" "github.com/stretchr/testify/require" ) @@ -61,8 +59,10 @@ func TestTimeEncoding(t *testing.T) { } func TestDateTime(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + var warnings []error + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) { + warnings = append(warnings, err) + }) table := []struct { Input string Expect string @@ -116,7 +116,7 @@ func TestDateTime(t *testing.T) { } for _, test := range table { - v, err := types.ParseDatetime(sc, test.Input) + v, err := types.ParseDatetime(typeCtx, test.Input) require.NoError(t, err) require.Equal(t, test.Expect, v.String()) } @@ -147,12 +147,12 @@ func TestDateTime(t *testing.T) { } for _, test := range fspTbl { - v, err := types.ParseTime(sc, test.Input, mysql.TypeDatetime, test.Fsp, nil) + v, err := types.ParseTime(typeCtx, test.Input, mysql.TypeDatetime, test.Fsp, nil) require.NoError(t, err) require.Equal(t, test.Expect, v.String()) } - v, _ := types.ParseTime(sc, "121231113045.9999999", mysql.TypeDatetime, 6, nil) + v, _ := types.ParseTime(typeCtx, "121231113045.9999999", mysql.TypeDatetime, 6, nil) require.Equal(t, 46, v.Second()) require.Equal(t, 0, v.Microsecond()) @@ -177,9 +177,9 @@ func TestDateTime(t *testing.T) { } for _, test := range errTable { - _, err := types.ParseDatetime(sc, test) - require.True(t, err != nil || sc.WarningCount() > 0) - sc.SetWarnings(nil) + _, err := types.ParseDatetime(typeCtx, test) + require.True(t, err != nil || len(warnings) > 0) + warnings = nil } } @@ -192,7 +192,7 @@ func TestTimestamp(t *testing.T) { } for _, test := range table { - v, err := types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.UTC), test.Input) + v, err := types.ParseTimestamp(types.DefaultStmtNoWarningContext, test.Input) require.NoError(t, err) require.Equal(t, test.Expect, v.String()) } @@ -203,14 +203,13 @@ func TestTimestamp(t *testing.T) { } for _, test := range errTable { - _, err := types.ParseTimestamp(stmtctx.NewStmtCtxWithTimeZone(time.UTC), test) + _, err := types.ParseTimestamp(types.DefaultStmtNoWarningContext, test) require.Error(t, err) } } func TestDate(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Expect string @@ -278,7 +277,7 @@ func TestDate(t *testing.T) { } for _, test := range table { - v, err := types.ParseDate(sc, test.Input) + v, err := types.ParseDate(typeCtx, test.Input) require.NoError(t, err) require.Equal(t, test.Expect, v.String()) } @@ -298,14 +297,13 @@ func TestDate(t *testing.T) { } for _, test := range errTable { - _, err := types.ParseDate(sc, test) + _, err := types.ParseDate(typeCtx, test) require.Error(t, err) } } func TestTime(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Expect string @@ -337,7 +335,7 @@ func TestTime(t *testing.T) { } for _, test := range table { - duration, isNull, err := types.ParseDuration(sc, test.Input, types.MinFsp) + duration, isNull, err := types.ParseDuration(typeCtx, test.Input, types.MinFsp) require.NoError(t, err) require.False(t, isNull) require.Equal(t, test.Expect, duration.String()) @@ -353,7 +351,7 @@ func TestTime(t *testing.T) { } for _, test := range table { - duration, _, err := types.ParseDuration(sc, test.Input, types.MaxFsp) + duration, _, err := types.ParseDuration(typeCtx, test.Input, types.MaxFsp) require.NoError(t, err) require.Equal(t, test.Expect, duration.String()) } @@ -368,7 +366,7 @@ func TestTime(t *testing.T) { } for _, test := range table { - duration, isNull, err := types.ParseDuration(sc, test.Input, types.MaxFsp) + duration, isNull, err := types.ParseDuration(typeCtx, test.Input, types.MaxFsp) require.False(t, isNull) require.True(t, types.ErrTruncatedWrongVal.Equal(err)) require.Equal(t, test.Expect, duration.String()) @@ -381,11 +379,11 @@ func TestTime(t *testing.T) { } for _, test := range errTable { - _, _, err := types.ParseDuration(sc, test, types.DefaultFsp) + _, _, err := types.ParseDuration(typeCtx, test, types.DefaultFsp) require.Error(t, err) } - duration, _, err := types.ParseDuration(sc, "4294967295 0:59:59", types.DefaultFsp) + duration, _, err := types.ParseDuration(typeCtx, "4294967295 0:59:59", types.DefaultFsp) require.Error(t, err) require.Equal(t, "838:59:59", duration.String()) @@ -428,15 +426,15 @@ func TestDurationAdd(t *testing.T) { {"00:00:00.099", 3, "00:00:00.001", 3, "00:00:00.100"}, } for _, test := range table { - duration, _, err := types.ParseDuration(nil, test.Input, test.Fsp) + duration, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, test.Input, test.Fsp) require.NoError(t, err) - ta, _, err := types.ParseDuration(nil, test.InputAdd, test.FspAdd) + ta, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, test.InputAdd, test.FspAdd) require.NoError(t, err) result, err := duration.Add(ta) require.NoError(t, err) require.Equal(t, test.Expect, result.String()) } - duration, _, err := types.ParseDuration(nil, "00:00:00", 0) + duration, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, "00:00:00", 0) require.NoError(t, err) ta := new(types.Duration) result, err := duration.Add(*ta) @@ -444,15 +442,14 @@ func TestDurationAdd(t *testing.T) { require.Equal(t, "00:00:00", result.String()) duration = types.Duration{Duration: math.MaxInt64, Fsp: 0} - tatmp, _, err := types.ParseDuration(nil, "00:01:00", 0) + tatmp, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, "00:01:00", 0) require.NoError(t, err) _, err = duration.Add(tatmp) require.Error(t, err) } func TestDurationSub(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Fsp int @@ -464,9 +461,9 @@ func TestDurationSub(t *testing.T) { {"00:00:00", 0, "00:00:00.1", 1, "-00:00:00.1"}, } for _, test := range table { - duration, _, err := types.ParseDuration(sc, test.Input, test.Fsp) + duration, _, err := types.ParseDuration(typeCtx, test.Input, test.Fsp) require.NoError(t, err) - ta, _, err := types.ParseDuration(sc, test.InputAdd, test.FspAdd) + ta, _, err := types.ParseDuration(typeCtx, test.InputAdd, test.FspAdd) require.NoError(t, err) result, err := duration.Sub(ta) require.NoError(t, err) @@ -475,8 +472,7 @@ func TestDurationSub(t *testing.T) { } func TestTimeFsp(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Fsp int @@ -495,7 +491,7 @@ func TestTimeFsp(t *testing.T) { } for _, test := range table { - duration, _, err := types.ParseDuration(sc, test.Input, test.Fsp) + duration, _, err := types.ParseDuration(typeCtx, test.Input, test.Fsp) require.NoError(t, err) require.Equal(t, test.Expect, duration.String()) } @@ -508,7 +504,7 @@ func TestTimeFsp(t *testing.T) { } for _, test := range errTable { - _, _, err := types.ParseDuration(sc, test.Input, test.Fsp) + _, _, err := types.ParseDuration(typeCtx, test.Input, test.Fsp) require.Error(t, err) } } @@ -575,13 +571,13 @@ func TestYear(t *testing.T) { } func TestCodec(t *testing.T) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext // MySQL timestamp value doesn't allow month=0 or day=0. - _, err := types.ParseTimestamp(sc, "2016-12-00 00:00:00") + _, err := types.ParseTimestamp(typeCtx, "2016-12-00 00:00:00") require.Error(t, err) - t5, err := types.ParseTimestamp(sc, "2010-10-10 10:11:11") + t5, err := types.ParseTimestamp(typeCtx, "2010-10-10 10:11:11") require.NoError(t, err) _, err = t5.ToPackedUint() require.NoError(t, err) @@ -602,7 +598,7 @@ func TestCodec(t *testing.T) { require.NoError(t, err) require.Equal(t, types.ZeroDatetime.String(), t3.String()) - t5, err = types.ParseDatetime(nil, "0001-01-01 00:00:00") + t5, err = types.ParseDatetime(types.DefaultStmtNoWarningContext, "0001-01-01 00:00:00") require.NoError(t, err) packed, _ = t5.ToPackedUint() @@ -619,7 +615,7 @@ func TestCodec(t *testing.T) { } for _, test := range tbl { - v, err := types.ParseTime(sc, test, mysql.TypeDatetime, types.MaxFsp, nil) + v, err := types.ParseTime(typeCtx, test, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) packed, _ = v.ToPackedUint() @@ -670,7 +666,7 @@ func TestParseTimeFromNum(t *testing.T) { for ith, test := range table { // testtypes.ParseDatetimeFromNum - t1, err := types.ParseDatetimeFromNum(nil, test.Input) + t1, err := types.ParseDatetimeFromNum(types.DefaultStmtNoWarningContext, test.Input) if test.ExpectDateTimeError { require.Errorf(t, err, "%d", ith) } else { @@ -680,7 +676,7 @@ func TestParseTimeFromNum(t *testing.T) { require.Equal(t, test.ExpectDateTimeValue, t1.String()) // testtypes.ParseTimestampFromNum - t1, err = types.ParseTimestampFromNum(stmtctx.NewStmtCtxWithTimeZone(time.UTC), test.Input) + t1, err = types.ParseTimestampFromNum(types.DefaultStmtNoWarningContext, test.Input) if test.ExpectTimeStampError { require.Error(t, err) } else { @@ -690,7 +686,7 @@ func TestParseTimeFromNum(t *testing.T) { require.Equal(t, test.ExpectTimeStampValue, t1.String()) // testtypes.ParseDateFromNum - t1, err = types.ParseDateFromNum(nil, test.Input) + t1, err = types.ParseDateFromNum(types.DefaultStmtNoWarningContext, test.Input) if test.ExpectDateTimeError { require.Error(t, err) @@ -703,11 +699,9 @@ func TestParseTimeFromNum(t *testing.T) { } func TestToNumber(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true losAngelesTz, err := time.LoadLocation("America/Los_Angeles") require.NoError(t, err) - sc.SetTimeZone(losAngelesTz) + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), losAngelesTz, func(err error) {}) tblDateTime := []struct { Input string Fsp int @@ -725,7 +719,7 @@ func TestToNumber(t *testing.T) { } for _, test := range tblDateTime { - v, err := types.ParseTime(sc, test.Input, mysql.TypeDatetime, test.Fsp, nil) + v, err := types.ParseTime(typeCtx, test.Input, mysql.TypeDatetime, test.Fsp, nil) require.NoError(t, err) require.Equal(t, test.Expect, v.ToNumber().String()) } @@ -748,7 +742,7 @@ func TestToNumber(t *testing.T) { } for _, test := range tblDate { - v, err := types.ParseTime(sc, test.Input, mysql.TypeDate, 0, nil) + v, err := types.ParseTime(typeCtx, test.Input, mysql.TypeDate, 0, nil) require.NoError(t, err) require.Equal(t, test.Expect, v.ToNumber().String()) } @@ -771,7 +765,7 @@ func TestToNumber(t *testing.T) { } for _, test := range tblDuration { - v, _, err := types.ParseDuration(sc, test.Input, test.Fsp) + v, _, err := types.ParseDuration(typeCtx, test.Input, test.Fsp) require.NoError(t, err) // now we can only changetypes.Duration's Fsp to check ToNumber with different Fsp require.Equal(t, test.Expect, v.ToNumber().String()) @@ -779,8 +773,7 @@ func TestToNumber(t *testing.T) { } func TestParseTimeFromFloatString(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Fsp int @@ -800,7 +793,7 @@ func TestParseTimeFromFloatString(t *testing.T) { } for _, test := range table { - v, err := types.ParseTimeFromFloatString(sc, test.Input, mysql.TypeDatetime, test.Fsp) + v, err := types.ParseTimeFromFloatString(typeCtx, test.Input, mysql.TypeDatetime, test.Fsp) if test.ExpectError { require.Error(t, err) } else { @@ -847,9 +840,7 @@ func TestParseFrac(t *testing.T) { } func TestRoundFrac(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true - sc.SetTimeZone(time.UTC) + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) tbl := []struct { Input string Fsp int @@ -869,16 +860,16 @@ func TestRoundFrac(t *testing.T) { } for _, tt := range tbl { - v, err := types.ParseTime(sc, tt.Input, mysql.TypeDatetime, types.MaxFsp, nil) + v, err := types.ParseTime(typeCtx, tt.Input, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - nv, err := v.RoundFrac(sc, tt.Fsp) + nv, err := v.RoundFrac(typeCtx, tt.Fsp) require.NoError(t, err) require.Equal(t, tt.Except, nv.String()) } // test different time zone losAngelesTz, err := time.LoadLocation("America/Los_Angeles") require.NoError(t, err) - sc.SetTimeZone(losAngelesTz) + typeCtx = typeCtx.WithLocation(losAngelesTz) tbl = []struct { Input string Fsp int @@ -894,9 +885,9 @@ func TestRoundFrac(t *testing.T) { } for _, tt := range tbl { - v, err := types.ParseTime(sc, tt.Input, mysql.TypeDatetime, types.MaxFsp, nil) + v, err := types.ParseTime(typeCtx, tt.Input, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - nv, err := v.RoundFrac(sc, tt.Fsp) + nv, err := v.RoundFrac(typeCtx, tt.Fsp) require.NoError(t, err) require.Equal(t, tt.Except, nv.String()) } @@ -915,9 +906,9 @@ func TestRoundFrac(t *testing.T) { } for _, tt := range tbl { - v, _, err := types.ParseDuration(sc, tt.Input, types.MaxFsp) + v, _, err := types.ParseDuration(typeCtx, tt.Input, types.MaxFsp) require.NoError(t, err) - nv, err := v.RoundFrac(tt.Fsp, sc.TimeZone()) + nv, err := v.RoundFrac(tt.Fsp, typeCtx.Location()) require.NoError(t, err) require.Equal(t, tt.Except, nv.String()) } @@ -939,10 +930,8 @@ func TestRoundFrac(t *testing.T) { } func TestConvert(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true losAngelesTz, _ := time.LoadLocation("America/Los_Angeles") - sc.SetTimeZone(losAngelesTz) + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), losAngelesTz, func(err error) {}) tbl := []struct { Input string Fsp int @@ -958,7 +947,7 @@ func TestConvert(t *testing.T) { } for _, tt := range tbl { - v, err := types.ParseTime(sc, tt.Input, mysql.TypeDatetime, tt.Fsp, nil) + v, err := types.ParseTime(typeCtx, tt.Input, mysql.TypeDatetime, tt.Fsp, nil) require.NoError(t, err) nv, err := v.ConvertToDuration() require.NoError(t, err) @@ -975,21 +964,21 @@ func TestConvert(t *testing.T) { {"1 11:30:45.999999", 0}, } // test different time zone. - sc.SetTimeZone(time.UTC) + typeCtx = typeCtx.WithLocation(time.UTC) for _, tt := range tblDuration { - v, _, err := types.ParseDuration(sc, tt.Input, tt.Fsp) + v, _, err := types.ParseDuration(typeCtx, tt.Input, tt.Fsp) require.NoError(t, err) - year, month, day := time.Now().In(sc.TimeZone()).Date() - n := time.Date(year, month, day, 0, 0, 0, 0, sc.TimeZone()) - t1, err := v.ConvertToTime(sc, mysql.TypeDatetime) + year, month, day := time.Now().In(typeCtx.Location()).Date() + n := time.Date(year, month, day, 0, 0, 0, 0, typeCtx.Location()) + t1, err := v.ConvertToTime(typeCtx, mysql.TypeDatetime) require.NoError(t, err) - t2, _ := t1.GoTime(sc.TimeZone()) + t2, _ := t1.GoTime(typeCtx.Location()) require.Equal(t, v.Duration, t2.Sub(n)) } } func TestCompare(t *testing.T) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext tbl := []struct { Arg1 string Arg2 string @@ -1003,17 +992,17 @@ func TestCompare(t *testing.T) { } for _, tt := range tbl { - v1, err := types.ParseTime(sc, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) + v1, err := types.ParseTime(typeCtx, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - ret, err := v1.CompareString(nil, tt.Arg2) + ret, err := v1.CompareString(types.DefaultStmtNoWarningContext, tt.Arg2) require.NoError(t, err) require.Equal(t, tt.Ret, ret) } - v1, err := types.ParseTime(sc, "2011-10-10 11:11:11", mysql.TypeDatetime, types.MaxFsp, nil) + v1, err := types.ParseTime(typeCtx, "2011-10-10 11:11:11", mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - res, err := v1.CompareString(nil, "Test should error") + res, err := v1.CompareString(types.DefaultStmtNoWarningContext, "Test should error") require.Error(t, err) require.Equal(t, 0, res) @@ -1028,10 +1017,10 @@ func TestCompare(t *testing.T) { } for _, tt := range tbl { - v1, _, err := types.ParseDuration(nil, tt.Arg1, types.MaxFsp) + v1, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, tt.Arg1, types.MaxFsp) require.NoError(t, err) - ret, err := v1.CompareString(nil, tt.Arg2) + ret, err := v1.CompareString(types.DefaultStmtNoWarningContext, tt.Arg2) require.NoError(t, err) require.Equal(t, tt.Ret, ret) } @@ -1052,7 +1041,7 @@ func TestDurationClock(t *testing.T) { } for _, tt := range tbl { - d, _, err := types.ParseDuration(stmtctx.NewStmtCtxWithTimeZone(time.UTC), tt.Input, types.MaxFsp) + d, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, tt.Input, types.MaxFsp) require.NoError(t, err) require.Equal(t, tt.Hour, d.Hour()) require.Equal(t, tt.Minute, d.Minute()) @@ -1163,15 +1152,15 @@ func TestTimeAdd(t *testing.T) { {"2017-08-21", "01:01:01.001", "2017-08-21 01:01:01.001"}, } - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext for _, tt := range tbl { - v1, err := types.ParseTime(sc, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) + v1, err := types.ParseTime(typeCtx, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - dur, _, err := types.ParseDuration(sc, tt.Arg2, types.MaxFsp) + dur, _, err := types.ParseDuration(typeCtx, tt.Arg2, types.MaxFsp) require.NoError(t, err) - result, err := types.ParseTime(sc, tt.Ret, mysql.TypeDatetime, types.MaxFsp, nil) + result, err := types.ParseTime(typeCtx, tt.Ret, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - v2, err := v1.Add(sc, dur) + v2, err := v1.Add(typeCtx, dur) require.NoError(t, err) require.Equalf(t, 0, v2.Compare(result), "%v %v", v2.CoreTime(), result.CoreTime()) } @@ -1252,7 +1241,7 @@ func TestCheckTimestamp(t *testing.T) { } for _, tt := range tests { - validTimestamp := types.CheckTimestampTypeForTest(stmtctx.NewStmtCtxWithTimeZone(tt.tz), tt.input, nil) + validTimestamp := types.CheckTimestampTypeForTest(types.NewContext(types.StrictFlags, tt.tz, func(err error) {}), tt.input, nil) if tt.expectRetError { require.Errorf(t, validTimestamp, "For %s %s", tt.input, tt.tz) } else { @@ -1309,7 +1298,7 @@ func TestCheckTimestamp(t *testing.T) { } for _, tt := range tests { - validTimestamp := types.CheckTimestampTypeForTest(stmtctx.NewStmtCtxWithTimeZone(tt.tz), tt.input, nil) + validTimestamp := types.CheckTimestampTypeForTest(types.NewContext(types.StrictFlags, tt.tz, func(err error) {}), tt.input, nil) if tt.expectRetError { require.Errorf(t, validTimestamp, "For %s %s", tt.input, tt.tz) } else { @@ -1785,11 +1774,10 @@ func TestIsDateFormat(t *testing.T) { } func TestParseTimeFromInt64(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) input := int64(20190412140000) - output, err := types.ParseTimeFromInt64(sc, input) + output, err := types.ParseTimeFromInt64(typeCtx, input) require.NoError(t, err) require.Equal(t, types.DefaultFsp, output.Fsp()) require.Equal(t, mysql.TypeDatetime, output.Type()) @@ -1803,8 +1791,7 @@ func TestParseTimeFromInt64(t *testing.T) { } func TestParseTimeFromFloat64(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) cases := []struct { f float64 @@ -1829,7 +1816,7 @@ func TestParseTimeFromFloat64(t *testing.T) { } for _, c := range cases { - res, err := types.ParseTimeFromFloat64(sc, c.f) + res, err := types.ParseTimeFromFloat64(typeCtx, c.f) require.Equalf(t, c.t, res.Type(), "Type mismatch for case %v", c) require.Equalf(t, c.Y, res.Year(), "Year mismatch for case %v", c) require.Equalf(t, c.M, res.Month(), "Month mismatch for case %v", c) @@ -1847,8 +1834,7 @@ func TestParseTimeFromFloat64(t *testing.T) { } func TestParseTimeFromDecimal(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) cases := []struct { d *types.MyDecimal @@ -1873,7 +1859,7 @@ func TestParseTimeFromDecimal(t *testing.T) { } for _, c := range cases { - res, err := types.ParseTimeFromDecimal(sc, c.d) + res, err := types.ParseTimeFromDecimal(typeCtx, c.d) require.Equalf(t, c.t, res.Type(), "Type mismatch for case %v", c) require.Equalf(t, c.Y, res.Year(), "Year mismatch for case %v", c) require.Equalf(t, c.M, res.Month(), "Month mismatch for case %v", c) @@ -1923,8 +1909,7 @@ func TestGetFracIndex(t *testing.T) { } func TestTimeOverflow(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - sc.IgnoreZeroInDate = true + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreZeroInDate(true), time.UTC, func(err error) {}) table := []struct { Input string Output bool @@ -1947,9 +1932,9 @@ func TestTimeOverflow(t *testing.T) { } for _, test := range table { - v, err := types.ParseDatetime(sc, test.Input) + v, err := types.ParseDatetime(typeCtx, test.Input) require.NoError(t, err) - isOverflow, err := types.DateTimeIsOverflow(sc, v) + isOverflow, err := types.DateTimeIsOverflow(typeCtx, v) require.NoError(t, err) require.Equal(t, test.Output, isOverflow) } @@ -1983,15 +1968,15 @@ func TestTimeSub(t *testing.T) { {"2019-04-12 18:20:00", "2019-04-12 14:00:00", "04:20:00"}, } - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext for _, tt := range tbl { - v1, err := types.ParseTime(sc, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) + v1, err := types.ParseTime(typeCtx, tt.Arg1, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - v2, err := types.ParseTime(sc, tt.Arg2, mysql.TypeDatetime, types.MaxFsp, nil) + v2, err := types.ParseTime(typeCtx, tt.Arg2, mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) - dur, _, err := types.ParseDuration(sc, tt.Ret, types.MaxFsp) + dur, _, err := types.ParseDuration(typeCtx, tt.Ret, types.MaxFsp) require.NoError(t, err) - rec := v1.Sub(sc, &v2) + rec := v1.Sub(typeCtx, &v2) require.Equal(t, dur, rec) } } @@ -2016,12 +2001,11 @@ func TestCheckMonthDay(t *testing.T) { {types.FromDate(3200, 2, 29, 0, 0, 0, 0), true}, } - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - sc.AllowInvalidDate = false + typeCtx := types.NewContext(types.StrictFlags.WithIgnoreInvalidDateErr(false), time.UTC, func(err error) {}) for _, tt := range dates { v := types.NewTime(tt.date, mysql.TypeDate, types.DefaultFsp) - err := v.Check(sc) + err := v.Check(typeCtx) if tt.isValidDate { require.NoError(t, err) } else { @@ -2182,7 +2166,7 @@ func TestParseWithTimezone(t *testing.T) { }, } for ith, ca := range cases { - v, err := types.ParseTime(stmtctx.NewStmtCtxWithTimeZone(ca.sysTZ), ca.lit, mysql.TypeTimestamp, ca.fsp, nil) + v, err := types.ParseTime(types.NewContext(types.StrictFlags, ca.sysTZ, func(err error) {}), ca.lit, mysql.TypeTimestamp, ca.fsp, nil) require.NoErrorf(t, err, "tidb time parse misbehaved on %d", ith) if err != nil { continue @@ -2194,8 +2178,8 @@ func TestParseWithTimezone(t *testing.T) { } func TestMarshalTime(t *testing.T) { - sc := mock.NewContext().GetSessionVars().StmtCtx - v1, err := types.ParseTime(sc, "2017-01-18 01:01:01.123456", mysql.TypeDatetime, types.MaxFsp, nil) + typeCtx := types.DefaultStmtNoWarningContext + v1, err := types.ParseTime(typeCtx, "2017-01-18 01:01:01.123456", mysql.TypeDatetime, types.MaxFsp, nil) require.NoError(t, err) j, err := json.Marshal(v1) require.NoError(t, err) @@ -2215,11 +2199,11 @@ func BenchmarkFormat(b *testing.B) { } func BenchmarkTimeAdd(b *testing.B) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - arg1, _ := types.ParseTime(sc, "2017-01-18", mysql.TypeDatetime, types.MaxFsp, nil) - arg2, _, _ := types.ParseDuration(sc, "12:30:59", types.MaxFsp) + typeCtx := types.DefaultStmtNoWarningContext + arg1, _ := types.ParseTime(typeCtx, "2017-01-18", mysql.TypeDatetime, types.MaxFsp, nil) + arg2, _, _ := types.ParseDuration(typeCtx, "12:30:59", types.MaxFsp) for i := 0; i < b.N; i++ { - _, err := arg1.Add(sc, arg2) + _, err := arg1.Add(typeCtx, arg2) if err != nil { b.Fatal(err) } @@ -2227,9 +2211,9 @@ func BenchmarkTimeAdd(b *testing.B) { } func BenchmarkTimeCompare(b *testing.B) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) + typeCtx := types.DefaultStmtNoWarningContext mustParse := func(str string) types.Time { - t, err := types.ParseDatetime(sc, str) + t, err := types.ParseDatetime(typeCtx, str) if err != nil { b.Fatal(err) } @@ -2278,10 +2262,10 @@ func BenchmarkParseDateFormat(b *testing.B) { benchmarkDateFormat(b, "datetime repeated delimiters", "2011---12---13 14::15::16..123456") } -func benchmarkDatetimeFormat(b *testing.B, name string, sc *stmtctx.StatementContext, str string) { +func benchmarkDatetimeFormat(b *testing.B, name string, ctx types.Context, str string) { b.Run(name, func(b *testing.B) { for i := 0; i < b.N; i++ { - _, err := types.ParseDatetime(sc, str) + _, err := types.ParseDatetime(ctx, str) if err != nil { b.Fatal(err) } @@ -2290,23 +2274,23 @@ func benchmarkDatetimeFormat(b *testing.B, name string, sc *stmtctx.StatementCon } func BenchmarkParseDatetimeFormat(b *testing.B) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - benchmarkDatetimeFormat(b, "datetime without timezone", sc, "2020-10-10T10:10:10") - benchmarkDatetimeFormat(b, "datetime with timezone", sc, "2020-10-10T10:10:10Z+08:00") + typeCtx := types.DefaultStmtNoWarningContext + benchmarkDatetimeFormat(b, "datetime without timezone", typeCtx, "2020-10-10T10:10:10") + benchmarkDatetimeFormat(b, "datetime with timezone", typeCtx, "2020-10-10T10:10:10Z+08:00") } -func benchmarkStrToDate(b *testing.B, name string, sc *stmtctx.StatementContext, str, format string) { +func benchmarkStrToDate(b *testing.B, name string, ctx types.Context, str, format string) { b.Run(name, func(b *testing.B) { for i := 0; i < b.N; i++ { var t types.Time - t.StrToDate(sc, str, format) + t.StrToDate(ctx, str, format) } }) } func BenchmarkStrToDate(b *testing.B) { - sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - benchmarkStrToDate(b, "strToDate yyyyMMdd hhmmss ffff", sc, "31/05/2016 12:34:56.1234", "%d/%m/%Y %H:%i:%S.%f") - benchmarkStrToDate(b, "strToDate %r ddMMyyyy", sc, "04:13:56 AM 13/05/2019", "%r %d/%c/%Y") - benchmarkStrToDate(b, "strToDate %T ddMMyyyy", sc, " 4:13:56 13/05/2019", "%T %d/%c/%Y") + typeCtx := types.DefaultStmtNoWarningContext + benchmarkStrToDate(b, "strToDate yyyyMMdd hhmmss ffff", typeCtx, "31/05/2016 12:34:56.1234", "%d/%m/%Y %H:%i:%S.%f") + benchmarkStrToDate(b, "strToDate %r ddMMyyyy", typeCtx, "04:13:56 AM 13/05/2019", "%r %d/%c/%Y") + benchmarkStrToDate(b, "strToDate %T ddMMyyyy", typeCtx, " 4:13:56 13/05/2019", "%T %d/%c/%Y") } diff --git a/pkg/types/context/truncate.go b/pkg/types/truncate.go similarity index 99% rename from pkg/types/context/truncate.go rename to pkg/types/truncate.go index 271c8ed4b1d16..67b531925b480 100644 --- a/pkg/types/context/truncate.go +++ b/pkg/types/truncate.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package context +package types import ( "github.com/pingcap/errors" diff --git a/pkg/util/chunk/BUILD.bazel b/pkg/util/chunk/BUILD.bazel index e8ee18cba64ea..8aa1fa4f47f36 100644 --- a/pkg/util/chunk/BUILD.bazel +++ b/pkg/util/chunk/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//pkg/util/encrypt", "//pkg/util/hack", "//pkg/util/logutil", - "//pkg/util/mathutil", "//pkg/util/memory", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", @@ -67,7 +66,6 @@ go_test( "//pkg/testkit/testsetup", "//pkg/types", "//pkg/util/collate", - "//pkg/util/mathutil", "//pkg/util/memory", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/pkg/util/chunk/alloc.go b/pkg/util/chunk/alloc.go index 306ca6283897a..ac40309a88c7c 100644 --- a/pkg/util/chunk/alloc.go +++ b/pkg/util/chunk/alloc.go @@ -18,7 +18,6 @@ import ( "math" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // Allocator is an interface defined to reduce object allocation. @@ -101,7 +100,7 @@ func (a *allocator) Alloc(fields []*types.FieldType, capacity, maxChunkSize int) } // Init the chunk fields. - chk.capacity = mathutil.Min(capacity, maxChunkSize) + chk.capacity = min(capacity, maxChunkSize) chk.requiredRows = maxChunkSize // Allocate the chunk columns from the pool column allocator. for _, f := range fields { diff --git a/pkg/util/chunk/chunk.go b/pkg/util/chunk/chunk.go index db1539ed187af..6242de8450633 100644 --- a/pkg/util/chunk/chunk.go +++ b/pkg/util/chunk/chunk.go @@ -19,7 +19,6 @@ import ( "github.com/pingcap/errors" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" ) var msgErrSelNotNil = "The selection vector of Chunk is not nil. Please file a bug to the TiDB Team" @@ -67,7 +66,7 @@ func NewChunkWithCapacity(fields []*types.FieldType, capacity int) *Chunk { func New(fields []*types.FieldType, capacity, maxChunkSize int) *Chunk { chk := &Chunk{ columns: make([]*Column, 0, len(fields)), - capacity: mathutil.Min(capacity, maxChunkSize), + capacity: min(capacity, maxChunkSize), // set the default value of requiredRows to maxChunkSize to let chk.IsFull() behave // like how we judge whether a chunk is full now, then the statement // "chk.NumRows() < maxChunkSize" @@ -325,7 +324,7 @@ func reCalcCapacity(c *Chunk, maxChunkSize int) int { if newCapacity == 0 { newCapacity = InitialCapacity } - return mathutil.Min(newCapacity, maxChunkSize) + return min(newCapacity, maxChunkSize) } // Capacity returns the capacity of the Chunk. diff --git a/pkg/util/chunk/chunk_test.go b/pkg/util/chunk/chunk_test.go index 2484f751b472a..7ff2292db7f89 100644 --- a/pkg/util/chunk/chunk_test.go +++ b/pkg/util/chunk/chunk_test.go @@ -26,7 +26,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/sessionctx/stmtctx" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/stretchr/testify/require" ) @@ -284,7 +283,7 @@ func TestChunkSizeControl(t *testing.T) { chk.Reset() for i := 1; i < maxChunkSize*2; i++ { chk.SetRequiredRows(i, maxChunkSize) - require.Equal(t, mathutil.Min(maxChunkSize, i), chk.RequiredRows()) + require.Equal(t, min(maxChunkSize, i), chk.RequiredRows()) } chk.SetRequiredRows(1, maxChunkSize). @@ -546,7 +545,7 @@ func TestGetDecimalDatum(t *testing.T) { decType.SetFlen(4) decType.SetDecimal(2) sc := stmtctx.NewStmtCtx() - decDatum, err := datum.ConvertTo(sc, decType) + decDatum, err := datum.ConvertTo(sc.TypeCtx(), decType) require.NoError(t, err) chk := NewChunkWithCapacity([]*types.FieldType{decType}, 32) diff --git a/pkg/util/chunk/codec.go b/pkg/util/chunk/codec.go index 3a64e48476e6f..324a90c255a25 100644 --- a/pkg/util/chunk/codec.go +++ b/pkg/util/chunk/codec.go @@ -21,7 +21,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // Codec is used to: @@ -152,7 +151,7 @@ func (*Codec) setAllNotNull(col *Column) { numNullBitmapBytes := (col.length + 7) / 8 col.nullBitmap = col.nullBitmap[:0] for i := 0; i < numNullBitmapBytes; { - numAppendBytes := mathutil.Min(numNullBitmapBytes-i, cap(allNotNullBitmap)) + numAppendBytes := min(numNullBitmapBytes-i, cap(allNotNullBitmap)) col.nullBitmap = append(col.nullBitmap, allNotNullBitmap[:numAppendBytes]...) i += numAppendBytes } diff --git a/pkg/util/chunk/list_test.go b/pkg/util/chunk/list_test.go index df6c29fc3591b..106b9ece6f438 100644 --- a/pkg/util/chunk/list_test.go +++ b/pkg/util/chunk/list_test.go @@ -22,7 +22,6 @@ import ( "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/stretchr/testify/require" ) @@ -164,7 +163,7 @@ func BenchmarkListGetRow(b *testing.B) { } rand.Seed(0) ptrs := make([]RowPtr, 0, b.N) - for i := 0; i < mathutil.Min(b.N, 10000); i++ { + for i := 0; i < min(b.N, 10000); i++ { ptrs = append(ptrs, RowPtr{ ChkIdx: rand.Uint32() % uint32(numChk), RowIdx: rand.Uint32() % uint32(numRow), diff --git a/pkg/util/chunk/mutrow_test.go b/pkg/util/chunk/mutrow_test.go index 82d0ce32adb82..3e3a2c902326c 100644 --- a/pkg/util/chunk/mutrow_test.go +++ b/pkg/util/chunk/mutrow_test.go @@ -34,7 +34,7 @@ func TestMutRow(t *testing.T) { val := zeroValForType(allTypes[i]) d := row.GetDatum(i, allTypes[i]) d2 := types.NewDatum(val) - cmp, err := d.Compare(sc, &d2, collate.GetCollator(allTypes[i].GetCollate())) + cmp, err := d.Compare(sc.TypeCtx(), &d2, collate.GetCollator(allTypes[i].GetCollate())) require.NoError(t, err) require.Equal(t, 0, cmp) } @@ -79,7 +79,7 @@ func TestMutRow(t *testing.T) { retTypes := []*types.FieldType{types.NewFieldType(mysql.TypeDuration)} chk := New(retTypes, 1, 1) - dur, _, err := types.ParseDuration(sc, "01:23:45", 0) + dur, _, err := types.ParseDuration(sc.TypeCtx(), "01:23:45", 0) require.NoError(t, err) chk.AppendDuration(0, dur) mutRow = MutRowFromTypes(retTypes) diff --git a/pkg/util/chunk/row_container.go b/pkg/util/chunk/row_container.go index 324a2edbb686b..ed23cf8d70528 100644 --- a/pkg/util/chunk/row_container.go +++ b/pkg/util/chunk/row_container.go @@ -573,7 +573,11 @@ func (c *SortedRowContainer) Sort() (ret error) { ret = nil defer func() { if r := recover(); r != nil { - ret = fmt.Errorf("%v", r) + if err, ok := r.(error); ok { + ret = err + } else { + ret = fmt.Errorf("%v", r) + } } }() if c.ptrM.rowPtrs != nil { diff --git a/pkg/util/chunk/row_in_disk_test.go b/pkg/util/chunk/row_in_disk_test.go index 933a30834a779..1c27456a94ea3 100644 --- a/pkg/util/chunk/row_in_disk_test.go +++ b/pkg/util/chunk/row_in_disk_test.go @@ -30,7 +30,6 @@ import ( "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/parser/mysql" "github.com/pingcap/tidb/pkg/types" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -121,7 +120,7 @@ func BenchmarkDataInDiskByRowsGetRow(b *testing.B) { } rand.Seed(0) ptrs := make([]RowPtr, 0, b.N) - for i := 0; i < mathutil.Min(b.N, 10000); i++ { + for i := 0; i < min(b.N, 10000); i++ { ptrs = append(ptrs, RowPtr{ ChkIdx: rand.Uint32() % uint32(numChk), RowIdx: rand.Uint32() % uint32(numRow), diff --git a/pkg/util/codec/codec_test.go b/pkg/util/codec/codec_test.go index 3875c32a65ab4..b223938662c23 100644 --- a/pkg/util/codec/codec_test.go +++ b/pkg/util/codec/codec_test.go @@ -520,13 +520,13 @@ func TestBytes(t *testing.T) { func parseTime(t *testing.T, s string) types.Time { sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - m, err := types.ParseTime(sc, s, mysql.TypeDatetime, types.DefaultFsp, nil) + m, err := types.ParseTime(sc.TypeCtx(), s, mysql.TypeDatetime, types.DefaultFsp, nil) require.NoError(t, err) return m } func parseDuration(t *testing.T, s string) types.Duration { - m, _, err := types.ParseDuration(nil, s, types.DefaultFsp) + m, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, s, types.DefaultFsp) require.NoError(t, err) return m } @@ -778,7 +778,7 @@ func TestDecimal(t *testing.T) { _, err = EncodeDecimal(nil, d, 12, 10) require.Truef(t, terror.ErrorEqual(err, types.ErrOverflow), "err %v", err) - sc.SetTypeFlags(types.StrictFlags.WithIgnoreTruncateErr(true)) + sc.SetTypeFlags(types.DefaultStmtFlags.WithIgnoreTruncateErr(true)) decimalDatum := types.NewDatum(d) decimalDatum.SetLength(20) decimalDatum.SetFrac(5) @@ -963,7 +963,7 @@ func TestDecodeOneToChunk(t *testing.T) { require.True(t, expect.IsNull()) } else { if got.Kind() != types.KindMysqlDecimal { - cmp, err := got.Compare(sc, &expect, collate.GetCollator(tp.GetCollate())) + cmp, err := got.Compare(sc.TypeCtx(), &expect, collate.GetCollator(tp.GetCollate())) require.NoError(t, err) require.Equalf(t, 0, cmp, "expect: %v, got %v", expect, got) } else { @@ -1090,7 +1090,7 @@ func TestDecodeRange(t *testing.T) { datums1, _, err := DecodeRange(rowData, len(datums), nil, nil) require.NoError(t, err) for i, datum := range datums1 { - cmp, err := datum.Compare(nil, &datums[i], collate.GetBinaryCollator()) + cmp, err := datum.Compare(types.DefaultStmtNoWarningContext, &datums[i], collate.GetBinaryCollator()) require.NoError(t, err) require.Equal(t, 0, cmp) } diff --git a/pkg/util/dbterror/ddl_terror.go b/pkg/util/dbterror/ddl_terror.go index de98d1b9912b1..67861cfec2d7b 100644 --- a/pkg/util/dbterror/ddl_terror.go +++ b/pkg/util/dbterror/ddl_terror.go @@ -484,6 +484,8 @@ var ( " Use the LPAD function to zero-pad numbers, or store the formatted numbers in a CHAR column.", ), nil), ) + // ErrCheckConstraintDupName is for duplicate check constraint names + ErrCheckConstraintDupName = ClassDDL.NewStd(mysql.ErrCheckConstraintDupName) ) // ReorgRetryableErrCodes is the error codes that are retryable for reorganization. diff --git a/pkg/util/dbterror/exeerrors/errors.go b/pkg/util/dbterror/exeerrors/errors.go index 9d3c602a2d781..9176a6b70cd81 100644 --- a/pkg/util/dbterror/exeerrors/errors.go +++ b/pkg/util/dbterror/exeerrors/errors.go @@ -60,6 +60,8 @@ var ( ErrInstanceScope = dbterror.ClassExecutor.NewStd(mysql.ErrInstanceScope) ErrSettingNoopVariable = dbterror.ClassExecutor.NewStd(mysql.ErrSettingNoopVariable) ErrLazyUniquenessCheckFailure = dbterror.ClassExecutor.NewStd(mysql.ErrLazyUniquenessCheckFailure) + ErrMemoryExceedForQuery = dbterror.ClassExecutor.NewStd(mysql.ErrMemoryExceedForQuery) + ErrMemoryExceedForInstance = dbterror.ClassExecutor.NewStd(mysql.ErrMemoryExceedForInstance) ErrBRIEBackupFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIEBackupFailed) ErrBRIERestoreFailed = dbterror.ClassExecutor.NewStd(mysql.ErrBRIERestoreFailed) diff --git a/pkg/util/dbutil/common.go b/pkg/util/dbutil/common.go index d6291b30729fd..0ab538e247371 100644 --- a/pkg/util/dbutil/common.go +++ b/pkg/util/dbutil/common.go @@ -551,7 +551,7 @@ func AnalyzeValuesFromBuckets(valueString string, cols []*model.ColumnInfo) ([]s if IsTimeTypeAndNeedDecode(col.GetType()) { // check if values[i] is already a time string sc := stmtctx.NewStmtCtxWithTimeZone(time.UTC) - _, err := types.ParseTime(sc, values[i], col.GetType(), types.MinFsp, nil) + _, err := types.ParseTime(sc.TypeCtx(), values[i], col.GetType(), types.MinFsp, nil) if err == nil { continue } diff --git a/pkg/util/memory/BUILD.bazel b/pkg/util/memory/BUILD.bazel index 0a1c6321a7a75..565b1a6567c74 100644 --- a/pkg/util/memory/BUILD.bazel +++ b/pkg/util/memory/BUILD.bazel @@ -17,7 +17,7 @@ go_library( "//pkg/util/cgroup", "//pkg/util/dbterror", "//pkg/util/logutil", - "//pkg/util/mathutil", + "//pkg/util/sqlkiller", "@com_github_pingcap_failpoint//:failpoint", "@com_github_pingcap_sysutil//:sysutil", "@com_github_shirou_gopsutil_v3//mem", @@ -40,7 +40,6 @@ go_test( "//pkg/errno", "//pkg/parser/terror", "//pkg/testkit/testsetup", - "//pkg/util/mathutil", "@com_github_stretchr_testify//require", "@org_uber_go_goleak//:goleak", ], diff --git a/pkg/util/memory/action.go b/pkg/util/memory/action.go index 39a37be2a56ef..b4eb8513b168f 100644 --- a/pkg/util/memory/action.go +++ b/pkg/util/memory/action.go @@ -15,13 +15,13 @@ package memory import ( - "fmt" "sync" "sync/atomic" "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/util/dbterror" "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "go.uber.org/zap" ) @@ -63,21 +63,6 @@ func (a *actionWithPriority) GetPriority() int64 { return a.priority } -// ActionInvoker indicates the invoker of the Action. -type ActionInvoker byte - -const ( - // SingleQuery indicates the Action is invoked by a tidb_mem_quota_query. - SingleQuery ActionInvoker = iota - // Instance indicates the Action is invoked by a tidb_server_memory_limit. - Instance -) - -// ActionCareInvoker is the interface for the Actions which need to be aware of the invoker. -type ActionCareInvoker interface { - SetInvoker(invoker ActionInvoker) -} - // BaseOOMAction manages the fallback action for all Action. type BaseOOMAction struct { fallbackAction ActionOnExceed @@ -155,12 +140,12 @@ func (*LogOnExceed) GetPriority() int64 { // PanicOnExceed panics when memory usage exceeds memory quota. type PanicOnExceed struct { + Killer *sqlkiller.SQLKiller logHook func(uint64) BaseOOMAction - ConnID uint64 - mutex sync.Mutex // For synchronization. - acted bool - invoker ActionInvoker + ConnID uint64 + mutex sync.Mutex // For synchronization. + acted bool } // SetLogHook sets a hook for PanicOnExceed. @@ -183,10 +168,10 @@ func (a *PanicOnExceed) Action(t *Tracker) { } } a.acted = true - if a.invoker == SingleQuery { - panic(PanicMemoryExceedWarnMsg + WarnMsgSuffixForSingleQuery + fmt.Sprintf("[conn=%d]", a.ConnID)) + a.Killer.SendKillSignal(sqlkiller.QueryMemoryExceeded) + if err := a.Killer.HandleSignal(); err != nil { + panic(err) } - panic(PanicMemoryExceedWarnMsg + WarnMsgSuffixForInstance + fmt.Sprintf("[conn=%d]", a.ConnID)) } // GetPriority get the priority of the Action @@ -194,20 +179,6 @@ func (*PanicOnExceed) GetPriority() int64 { return DefPanicPriority } -// SetInvoker sets the invoker of the Action. -func (a *PanicOnExceed) SetInvoker(invoker ActionInvoker) { - a.invoker = invoker -} - var ( errMemExceedThreshold = dbterror.ClassUtil.NewStd(errno.ErrMemExceedThreshold) ) - -const ( - // PanicMemoryExceedWarnMsg represents the panic message when out of memory quota. - PanicMemoryExceedWarnMsg string = "Your query has been cancelled due to exceeding the allowed memory limit" - // WarnMsgSuffixForSingleQuery represents the suffix of the warning message when out of memory quota for a single query. - WarnMsgSuffixForSingleQuery string = " for a single SQL query. Please try narrowing your query scope or increase the tidb_mem_quota_query limit and try again." - // WarnMsgSuffixForInstance represents the suffix of the warning message when out of memory quota for the tidb-server instance. - WarnMsgSuffixForInstance string = " for the tidb-server instance and this query is currently using the most memory. Please try narrowing your query scope or increase the tidb_server_memory_limit and try again." -) diff --git a/pkg/util/memory/meminfo.go b/pkg/util/memory/meminfo.go index 8d0e25bcb759e..0d7bd68a2bcbe 100644 --- a/pkg/util/memory/meminfo.go +++ b/pkg/util/memory/meminfo.go @@ -22,8 +22,9 @@ import ( "github.com/pingcap/sysutil" "github.com/pingcap/tidb/pkg/parser/terror" "github.com/pingcap/tidb/pkg/util/cgroup" - "github.com/pingcap/tidb/pkg/util/mathutil" + "github.com/pingcap/tidb/pkg/util/logutil" "github.com/shirou/gopsutil/v3/mem" + "go.uber.org/zap" ) // MemTotal returns the total amount of RAM on this system @@ -51,6 +52,10 @@ func MemTotalNormal() (uint64, error) { if time.Since(t) < 60*time.Second { return total, nil } + return memTotalNormal() +} + +func memTotalNormal() (uint64, error) { v, err := mem.VirtualMemory() if err != nil { return 0, err @@ -116,7 +121,7 @@ func MemTotalCGroup() (uint64, error) { if err != nil { return 0, err } - memo = mathutil.Min(v.Total, memo) + memo = min(v.Total, memo) memLimit.set(memo, time.Now()) return memo, nil } @@ -135,11 +140,12 @@ func MemUsedCGroup() (uint64, error) { if err != nil { return 0, err } - memo = mathutil.Min(v.Used, memo) + memo = min(v.Used, memo) memUsage.set(memo, time.Now()) return memo, nil } +// it is for test and init. func init() { if cgroup.InContainer() { MemTotal = MemTotalCGroup @@ -164,6 +170,37 @@ func init() { terror.MustNil(err) } +// InitMemoryHook initializes the memory hook. +// It is to solve the problem that tidb cannot read cgroup in the systemd. +// so if we are not in the container, we compare the cgroup memory limit and the physical memory, +// the cgroup memory limit is smaller, we use the cgroup memory hook. +func InitMemoryHook() { + if cgroup.InContainer() { + logutil.BgLogger().Info("use cgroup memory hook because TiDB is in the container") + return + } + cgroupValue, err := cgroup.GetMemoryLimit() + if err != nil { + return + } + physicalValue, err := memTotalNormal() + if err != nil { + return + } + if physicalValue > cgroupValue && cgroupValue != 0 { + MemTotal = MemTotalCGroup + MemUsed = MemUsedCGroup + sysutil.RegisterGetMemoryCapacity(MemTotalCGroup) + logutil.BgLogger().Info("use cgroup memory hook", zap.Int64("cgroupMemorySize", int64(cgroupValue)), zap.Int64("physicalMemorySize", int64(physicalValue))) + } else { + logutil.BgLogger().Info("use physical memory hook", zap.Int64("cgroupMemorySize", int64(cgroupValue)), zap.Int64("physicalMemorySize", int64(physicalValue))) + } + _, err = MemTotal() + terror.MustNil(err) + _, err = MemUsed() + terror.MustNil(err) +} + // InstanceMemUsed returns the memory usage of this TiDB server func InstanceMemUsed() (uint64, error) { used, t := serverMemUsage.get() diff --git a/pkg/util/memory/tracker.go b/pkg/util/memory/tracker.go index 5b8d8cf101122..e60987066b667 100644 --- a/pkg/util/memory/tracker.go +++ b/pkg/util/memory/tracker.go @@ -25,9 +25,8 @@ import ( "time" "github.com/pingcap/tidb/pkg/metrics" - "github.com/pingcap/tidb/pkg/util/logutil" + "github.com/pingcap/tidb/pkg/util/sqlkiller" atomicutil "go.uber.org/atomic" - "go.uber.org/zap" ) // TrackMemWhenExceeds is the threshold when memory usage needs to be tracked. @@ -76,6 +75,7 @@ type Tracker struct { bytesLimit atomic.Value actionMuForHardLimit actionMu actionMuForSoftLimit actionMu + Killer *sqlkiller.SQLKiller mu struct { // The children memory trackers. If the Tracker is the Global Tracker, like executor.GlobalDiskUsageTracker, // we wouldn't maintain its children in order to avoiding mutex contention. @@ -92,10 +92,8 @@ type Tracker struct { bytesReleased int64 // Released bytes. maxConsumed atomicutil.Int64 // max number of bytes consumed during execution. SessionID atomicutil.Uint64 // SessionID indicates the sessionID the tracker is bound. - NeedKill atomic.Bool // NeedKill indicates whether this session need kill because OOM - NeedKillReceived sync.Once - IsRootTrackerOfSess bool // IsRootTrackerOfSess indicates whether this tracker is bound for session - isGlobal bool // isGlobal indicates whether this tracker is global tracker + IsRootTrackerOfSess bool // IsRootTrackerOfSess indicates whether this tracker is bound for session + isGlobal bool // isGlobal indicates whether this tracker is global tracker } type actionMu struct { @@ -321,8 +319,7 @@ func (t *Tracker) Detach() { parent.actionMuForSoftLimit.Lock() parent.actionMuForSoftLimit.actionOnExceed = nil parent.actionMuForSoftLimit.Unlock() - parent.NeedKill.Store(false) - parent.NeedKillReceived = sync.Once{} + parent.Killer.Reset() } parent.remove(t) t.mu.Lock() @@ -441,31 +438,7 @@ func (t *Tracker) Consume(bs int64) { } } - tryActionLastOne := func(mu *actionMu, tracker *Tracker) { - mu.Lock() - defer mu.Unlock() - if currentAction := mu.actionOnExceed; currentAction != nil { - for nextAction := currentAction.GetFallback(); nextAction != nil; { - currentAction = nextAction - nextAction = currentAction.GetFallback() - } - if action, ok := currentAction.(ActionCareInvoker); ok { - action.SetInvoker(Instance) - } - currentAction.Action(tracker) - } - } - if bs > 0 && sessionRootTracker != nil { - // Kill the Top1 session - if sessionRootTracker.NeedKill.Load() { - sessionRootTracker.NeedKillReceived.Do( - func() { - logutil.BgLogger().Warn("global memory controller, NeedKill signal is received successfully", - zap.Uint64("conn", sessionRootTracker.SessionID.Load())) - }) - tryActionLastOne(&sessionRootTracker.actionMuForHardLimit, sessionRootTracker) - } // Update the Top1 session memUsage := sessionRootTracker.BytesConsumed() limitSessMinSize := ServerMemoryLimitSessMinSize.Load() @@ -480,6 +453,13 @@ func (t *Tracker) Consume(bs int64) { } } + if bs > 0 && sessionRootTracker != nil { + err := sessionRootTracker.Killer.HandleSignal() + if err != nil { + panic(err) + } + } + if bs > 0 && rootExceed != nil { tryAction(&rootExceed.actionMuForHardLimit, rootExceed) } diff --git a/pkg/util/memory/tracker_test.go b/pkg/util/memory/tracker_test.go index ce6d3e3856031..c244b20ad28d2 100644 --- a/pkg/util/memory/tracker_test.go +++ b/pkg/util/memory/tracker_test.go @@ -27,7 +27,6 @@ import ( "github.com/pingcap/tidb/pkg/errno" "github.com/pingcap/tidb/pkg/parser/terror" - "github.com/pingcap/tidb/pkg/util/mathutil" "github.com/stretchr/testify/require" ) @@ -401,7 +400,7 @@ func TestMaxConsumed(t *testing.T) { } consumed += b tracker.Consume(b) - maxConsumed = mathutil.Max(maxConsumed, consumed) + maxConsumed = max(maxConsumed, consumed) require.Equal(t, consumed, r.BytesConsumed()) require.Equal(t, maxConsumed, r.MaxConsumed()) diff --git a/pkg/util/pdapi/const.go b/pkg/util/pdapi/const.go index d63f660beec46..243c297942a35 100644 --- a/pkg/util/pdapi/const.go +++ b/pkg/util/pdapi/const.go @@ -14,17 +14,86 @@ package pdapi +import ( + "fmt" + "time" +) + // The following constants are the APIs of PD server. const ( - HotRead = "/pd/api/v1/hotspot/regions/read" - HotWrite = "/pd/api/v1/hotspot/regions/write" - HotHistory = "/pd/api/v1/hotspot/regions/history" - Regions = "/pd/api/v1/regions" - RegionByID = "/pd/api/v1/region/id" - StoreRegions = "/pd/api/v1/regions/store" - Stores = "/pd/api/v1/stores" - Status = "/pd/api/v1/status" - Config = "/pd/api/v1/config" - ScanRegions = "/pd/api/v1/regions/key" - RegionKey = "/pd/api/v1/region/key" + HotRead = "/pd/api/v1/hotspot/regions/read" + HotWrite = "/pd/api/v1/hotspot/regions/write" + HotHistory = "/pd/api/v1/hotspot/regions/history" + Regions = "/pd/api/v1/regions" + StoreRegions = "/pd/api/v1/regions/store" + ScanRegions = "/pd/api/v1/regions/key" + EmptyRegions = "/pd/api/v1/regions/check/empty-region" + AccelerateSchedule = "/pd/api/v1/regions/accelerate-schedule" + RegionByID = "/pd/api/v1/region/id" + RegionByKey = "/pd/api/v1/region/key" + store = "/pd/api/v1/store" + Stores = "/pd/api/v1/stores" + Status = "/pd/api/v1/status" + RegionStats = "/pd/api/v1/stats/region" + Version = "/pd/api/v1/version" + Config = "/pd/api/v1/config" + ClusterVersion = "/pd/api/v1/config/cluster-version" + ScheduleConfig = "/pd/api/v1/config/schedule" + ReplicateConfig = "/pd/api/v1/config/replicate" + PlacementRule = "/pd/api/v1/config/rule" + PlacementRules = "/pd/api/v1/config/rules" + PlacementRulesGroup = "/pd/api/v1/config/rules/group" + RegionLabelRule = "/pd/api/v1/config/region-label/rule" + Schedulers = "/pd/api/v1/schedulers" + scatterRangeScheduler = "/pd/api/v1/schedulers/scatter-range-" + ResetTS = "/pd/api/v1/admin/reset-ts" + BaseAllocID = "/pd/api/v1/admin/base-alloc-id" + SnapshotRecoveringMark = "/pd/api/v1/admin/cluster/markers/snapshot-recovering" + MinResolvedTS = "/pd/api/v1/min-resolved-ts" + PProfProfile = "/pd/api/v1/debug/pprof/profile" + PProfHeap = "/pd/api/v1/debug/pprof/heap" + PProfMutex = "/pd/api/v1/debug/pprof/mutex" + PProfAllocs = "/pd/api/v1/debug/pprof/allocs" + PProfBlock = "/pd/api/v1/debug/pprof/block" + PProfGoroutine = "/pd/api/v1/debug/pprof/goroutine" ) + +// ConfigWithTTLSeconds returns the config API with the TTL seconds parameter. +func ConfigWithTTLSeconds(ttlSeconds float64) string { + return fmt.Sprintf("%s?ttlSecond=%.0f", Config, ttlSeconds) +} + +// StoreByID returns the store API with store ID parameter. +func StoreByID(id uint64) string { + return fmt.Sprintf("%s/%d", store, id) +} + +// StoreLabelByID returns the store label API with store ID parameter. +func StoreLabelByID(id uint64) string { + return fmt.Sprintf("%s/%d/label", store, id) +} + +// RegionStatsByStartEndKey returns the region stats API with start key and end key parameters. +func RegionStatsByStartEndKey(startKey, endKey string) string { + return fmt.Sprintf("%s?start_key=%s&end_key=%s", RegionStats, startKey, endKey) +} + +// SchedulerByName returns the scheduler API with the given scheduler name. +func SchedulerByName(name string) string { + return fmt.Sprintf("%s/%s", Schedulers, name) +} + +// ScatterRangeSchedulerWithName returns the scatter range scheduler API with name parameter. +func ScatterRangeSchedulerWithName(name string) string { + return fmt.Sprintf("%s%s", scatterRangeScheduler, name) +} + +// PProfProfileAPIWithInterval returns the pprof profile API with interval parameter. +func PProfProfileAPIWithInterval(interval time.Duration) string { + return fmt.Sprintf("%s?seconds=%d", PProfProfile, interval/time.Second) +} + +// PProfGoroutineWithDebugLevel returns the pprof goroutine API with debug level parameter. +func PProfGoroutineWithDebugLevel(level int) string { + return fmt.Sprintf("%s?debug=%d", PProfGoroutine, level) +} diff --git a/pkg/util/profile/flamegraph_test.go b/pkg/util/profile/flamegraph_test.go index 02cb46f8859a5..71ad775b75fda 100644 --- a/pkg/util/profile/flamegraph_test.go +++ b/pkg/util/profile/flamegraph_test.go @@ -92,7 +92,7 @@ func TestProfileToDatum(t *testing.T) { comment = fmt.Sprintf("row %2d, actual (%s), expected (%s)", i, rowStr, expectStr) equal := true for j, r := range row { - v, err := r.Compare(nil, &datums[i][j], collate.GetBinaryCollator()) + v, err := r.Compare(types.DefaultStmtNoWarningContext, &datums[i][j], collate.GetBinaryCollator()) if v != 0 || err != nil { equal = false break diff --git a/pkg/util/ranger/BUILD.bazel b/pkg/util/ranger/BUILD.bazel index b0e45e7cfca1d..935274ab75cf0 100644 --- a/pkg/util/ranger/BUILD.bazel +++ b/pkg/util/ranger/BUILD.bazel @@ -30,7 +30,6 @@ go_library( "//pkg/util/codec", "//pkg/util/collate", "//pkg/util/dbterror", - "//pkg/util/mathutil", "@com_github_pingcap_errors//:errors", ], ) diff --git a/pkg/util/ranger/bench_test.go b/pkg/util/ranger/bench_test.go index 69deaab02ddbb..3d5f2470fbc7c 100644 --- a/pkg/util/ranger/bench_test.go +++ b/pkg/util/ranger/bench_test.go @@ -112,7 +112,7 @@ WHERE err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(b, err) ctx := context.Background() - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(b, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() diff --git a/pkg/util/ranger/detacher.go b/pkg/util/ranger/detacher.go index 954bbcee5254e..b9fe83d0d5d11 100644 --- a/pkg/util/ranger/detacher.go +++ b/pkg/util/ranger/detacher.go @@ -28,7 +28,6 @@ import ( "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/chunk" "github.com/pingcap/tidb/pkg/util/collate" - "github.com/pingcap/tidb/pkg/util/mathutil" ) // detachColumnCNFConditions detaches the condition for calculating range from the other conditions. @@ -207,8 +206,8 @@ func getCNFItemRangeResult(sctx sessionctx.Context, rangeResult *DetachRangeResu maxColNum = len(ran.LowVal) minColNum = len(ran.LowVal) } else { - maxColNum = mathutil.Max(maxColNum, len(ran.LowVal)) - minColNum = mathutil.Min(minColNum, len(ran.LowVal)) + maxColNum = max(maxColNum, len(ran.LowVal)) + minColNum = min(minColNum, len(ran.LowVal)) } } if minColNum != maxColNum { @@ -545,7 +544,7 @@ func allSinglePoints(sc *stmtctx.StatementContext, points []*point) []*point { return nil } // Since the point's collations are equal to the column's collation, we can use any of them. - cmp, err := left.value.Compare(sc, &right.value, collate.GetCollator(left.value.Collation())) + cmp, err := left.value.Compare(sc.TypeCtx(), &right.value, collate.GetCollator(left.value.Collation())) if err != nil || cmp != 0 { return nil } @@ -832,7 +831,7 @@ func isSameValue(sc *stmtctx.StatementContext, lhs, rhs *valueInfo) (bool, error return false, nil } // binary collator may not the best choice, but it can make sure the result is correct. - cmp, err := lhs.value.Compare(sc, rhs.value, collate.GetBinaryCollator()) + cmp, err := lhs.value.Compare(sc.TypeCtx(), rhs.value, collate.GetBinaryCollator()) if err != nil { return false, err } diff --git a/pkg/util/ranger/points.go b/pkg/util/ranger/points.go index 6830b953dbf0c..487c885c749b4 100644 --- a/pkg/util/ranger/points.go +++ b/pkg/util/ranger/points.go @@ -108,7 +108,7 @@ func rangePointLess(sc *stmtctx.StatementContext, a, b *point, collator collate. if a.value.Kind() == types.KindMysqlEnum && b.value.Kind() == types.KindMysqlEnum { return rangePointEnumLess(sc, a, b) } - cmp, err := a.value.Compare(sc, &b.value, collator) + cmp, err := a.value.Compare(sc.TypeCtx(), &b.value, collator) if cmp != 0 { return cmp < 0, nil } @@ -254,11 +254,11 @@ func (r *builder) buildFromBinOp(expr *expression.ScalarFunction) []*point { // If the original value is adjusted, we need to change the condition. // For example, col < 2156. Since the max year is 2155, 2156 is changed to 2155. // col < 2155 is wrong. It should be col <= 2155. - preValue, err1 := value.ToInt64(r.sc) + preValue, err1 := value.ToInt64(r.sc.TypeCtx()) if err1 != nil { return err1 } - *value, err = value.ConvertToMysqlYear(r.sc, col.RetType) + *value, err = value.ConvertToMysqlYear(r.sc.TypeCtx(), col.RetType) if errors.ErrorEqual(err, types.ErrWarnDataOutOfRange) { // Keep err for EQ and NE. switch *op { @@ -473,7 +473,7 @@ func handleEnumFromBinOp(sc *stmtctx.StatementContext, ft *types.FieldType, val } d := types.NewCollateMysqlEnumDatum(tmpEnum, ft.GetCollate()) - if v, err := d.Compare(sc, &val, collate.GetCollator(ft.GetCollate())); err == nil { + if v, err := d.Compare(sc.TypeCtx(), &val, collate.GetCollator(ft.GetCollate())); err == nil { switch op { case ast.LT: if v < 0 { @@ -585,7 +585,7 @@ func (r *builder) buildFromIn(expr *expression.ScalarFunction) ([]*point, bool) err = parseErr } default: - dt, err = dt.ConvertTo(r.sc, expr.GetArgs()[0].GetType()) + dt, err = dt.ConvertTo(r.sc.TypeCtx(), expr.GetArgs()[0].GetType()) } if err != nil { @@ -594,7 +594,7 @@ func (r *builder) buildFromIn(expr *expression.ScalarFunction) ([]*point, bool) } } if expr.GetArgs()[0].GetType().GetType() == mysql.TypeYear { - dt, err = dt.ConvertToMysqlYear(r.sc, expr.GetArgs()[0].GetType()) + dt, err = dt.ConvertToMysqlYear(r.sc.TypeCtx(), expr.GetArgs()[0].GetType()) if err != nil { // in (..., an impossible value (not valid year), ...), the range is empty, so skip it. continue diff --git a/pkg/util/ranger/ranger.go b/pkg/util/ranger/ranger.go index 10fa1cc70f6ae..efe801ef553cb 100644 --- a/pkg/util/ranger/ranger.go +++ b/pkg/util/ranger/ranger.go @@ -158,7 +158,7 @@ func convertPoint(sctx sessionctx.Context, point *point, tp *types.FieldType) (* case types.KindMaxValue, types.KindMinNotNull: return point, nil } - casted, err := point.value.ConvertTo(sc, tp) + casted, err := point.value.ConvertTo(sc.TypeCtx(), tp) if err != nil { if sctx.GetSessionVars().StmtCtx.InPreparedPlanBuilding { // skip plan cache in this case for safety. @@ -196,7 +196,7 @@ func convertPoint(sctx sessionctx.Context, point *point, tp *types.FieldType) (* } //revive:enable:empty-block } - valCmpCasted, err := point.value.Compare(sc, &casted, collate.GetCollator(tp.GetCollate())) + valCmpCasted, err := point.value.Compare(sc.TypeCtx(), &casted, collate.GetCollator(tp.GetCollate())) if err != nil { return point, errors.Trace(err) } @@ -772,7 +772,7 @@ func RangesToString(sc *stmtctx.StatementContext, rans Ranges, colNames []string // sanity check: only last column of the `Range` can be an interval if j < len(ran.LowVal)-1 { - cmp, err := ran.LowVal[j].Compare(sc, &ran.HighVal[j], ran.Collators[j]) + cmp, err := ran.LowVal[j].Compare(sc.TypeCtx(), &ran.HighVal[j], ran.Collators[j]) if err != nil { return "", errors.New("comparing values error: " + err.Error()) } @@ -829,7 +829,7 @@ func RangeSingleColToString(sc *stmtctx.StatementContext, lowVal, highVal types. restoreCtx := format.NewRestoreCtx(format.DefaultRestoreFlags, &buf) // case 2: low value and high value are the same, and low value and high value are both inclusive. - cmp, err := lowVal.Compare(sc, &highVal, collator) + cmp, err := lowVal.Compare(sc.TypeCtx(), &highVal, collator) if err != nil { return "false", errors.Trace(err) } diff --git a/pkg/util/ranger/ranger_test.go b/pkg/util/ranger/ranger_test.go index 71a4552ae491c..2453cbe9263ed 100644 --- a/pkg/util/ranger/ranger_test.go +++ b/pkg/util/ranger/ranger_test.go @@ -265,7 +265,7 @@ func TestTableRange(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) conds := make([]expression.Expression, len(selection.Conditions)) @@ -454,7 +454,7 @@ create table t( ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() @@ -815,7 +815,7 @@ func TestColumnRange(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) ds, ok := sel.Children()[0].(*plannercore.DataSource) @@ -972,7 +972,7 @@ func TestIndexRangeForYear(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() @@ -1040,7 +1040,7 @@ func TestPrefixIndexRangeScan(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() @@ -1387,7 +1387,7 @@ create table t( ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() @@ -1628,7 +1628,7 @@ func TestTableShardIndex(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) conds := make([]expression.Expression, len(selection.Conditions)) @@ -1656,7 +1656,7 @@ func TestTableShardIndex(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection, ok := p.(*plannercore.Update).SelectPlan.(*plannercore.PhysicalSelection) require.True(t, ok) @@ -1674,7 +1674,7 @@ func TestTableShardIndex(t *testing.T) { ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection, ok := p.(*plannercore.Delete).SelectPlan.(*plannercore.PhysicalSelection) require.True(t, ok) @@ -1819,7 +1819,7 @@ func getSelectionFromQuery(t *testing.T, sctx sessionctx.Context, sql string) *p ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err) selection, isSelection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) require.True(t, isSelection) @@ -2255,7 +2255,7 @@ create table t( ret := &plannercore.PreprocessorReturn{} err = plannercore.Preprocess(context.Background(), sctx, stmts[0], plannercore.WithPreprocessorReturn(ret)) require.NoError(t, err, fmt.Sprintf("error %v, for resolve name, expr %s", err, tt.exprStr)) - p, _, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) + p, err := plannercore.BuildLogicalPlanForTest(ctx, sctx, stmts[0], ret.InfoSchema) require.NoError(t, err, fmt.Sprintf("error %v, for build plan, expr %s", err, tt.exprStr)) selection := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) tbl := selection.Children()[0].(*plannercore.DataSource).TableInfo() diff --git a/pkg/util/ranger/types.go b/pkg/util/ranger/types.go index c3c8cbaa9ad02..f2b4c22787c3e 100644 --- a/pkg/util/ranger/types.go +++ b/pkg/util/ranger/types.go @@ -109,7 +109,7 @@ func (ran *Range) isPoint(stmtCtx *stmtctx.StatementContext, regardNullAsPoint b if a.Kind() == types.KindMinNotNull || b.Kind() == types.KindMaxValue { return false } - cmp, err := a.Compare(stmtCtx, &b, ran.Collators[i]) + cmp, err := a.Compare(stmtCtx.TypeCtx(), &b, ran.Collators[i]) if err != nil { return false } @@ -217,7 +217,7 @@ func (ran *Range) Encode(sc *stmtctx.StatementContext, lowBuffer, highBuffer []b func (ran *Range) PrefixEqualLen(sc *stmtctx.StatementContext) (int, error) { // Here, len(ran.LowVal) always equal to len(ran.HighVal) for i := 0; i < len(ran.LowVal); i++ { - cmp, err := ran.LowVal[i].Compare(sc, &ran.HighVal[i], ran.Collators[i]) + cmp, err := ran.LowVal[i].Compare(sc.TypeCtx(), &ran.HighVal[i], ran.Collators[i]) if err != nil { return 0, errors.Trace(err) } diff --git a/pkg/util/rowDecoder/decoder_test.go b/pkg/util/rowDecoder/decoder_test.go index fb77d027d4592..78eb540bedbf5 100644 --- a/pkg/util/rowDecoder/decoder_test.go +++ b/pkg/util/rowDecoder/decoder_test.go @@ -77,11 +77,11 @@ func TestRowDecoder(t *testing.T) { Duration: time.Hour + time.Second, }) - time2, err := time1.Add(sc, d1.GetMysqlDuration()) + time2, err := time1.Add(sc.TypeCtx(), d1.GetMysqlDuration()) require.Nil(t, err) t2 := types.NewTimeDatum(time2) - time3, err := time1.Add(sc, types.Duration{Duration: time.Hour*2 + time.Second*2}) + time3, err := time1.Add(sc.TypeCtx(), types.Duration{Duration: time.Hour*2 + time.Second*2}) require.Nil(t, err) t3 := types.NewTimeDatum(time3) @@ -125,7 +125,7 @@ func TestRowDecoder(t *testing.T) { for i, col := range cols[:len(cols)-1] { v, ok := r[col.ID] if ok { - equal, err1 := v.Compare(sc, &row.output[i], collate.GetBinaryCollator()) + equal, err1 := v.Compare(sc.TypeCtx(), &row.output[i], collate.GetBinaryCollator()) require.Nil(t, err1) require.Equal(t, 0, equal) } else { @@ -139,7 +139,7 @@ func TestRowDecoder(t *testing.T) { for k, v := range r2 { v1, ok := r[k] require.True(t, ok) - equal, err1 := v.Compare(sc, &v1, collate.GetBinaryCollator()) + equal, err1 := v.Compare(sc.TypeCtx(), &v1, collate.GetBinaryCollator()) require.Nil(t, err1) require.Equal(t, 0, equal) } @@ -197,7 +197,7 @@ func TestClusterIndexRowDecoder(t *testing.T) { for i, col := range cols { v, ok := r[col.ID] require.True(t, ok) - equal, err1 := v.Compare(sc, &row.output[i], collate.GetBinaryCollator()) + equal, err1 := v.Compare(sc.TypeCtx(), &row.output[i], collate.GetBinaryCollator()) require.Nil(t, err1) require.Equal(t, 0, equal) } diff --git a/pkg/util/rowcodec/common.go b/pkg/util/rowcodec/common.go index eb399b07ca088..ba097152e7bcb 100644 --- a/pkg/util/rowcodec/common.go +++ b/pkg/util/rowcodec/common.go @@ -338,7 +338,7 @@ func appendDatumForChecksum(buf []byte, dat *data.Datum, typ byte) (out []byte, out = binary.LittleEndian.AppendUint64(buf, dat.GetMysqlSet().Value) case mysql.TypeBit: // ticdc transforms a bit value as the following way, no need to handle truncate error here. - v, _ := dat.GetBinaryLiteral().ToInt(data.DefaultNoWarningContext) + v, _ := dat.GetBinaryLiteral().ToInt(data.DefaultStmtNoWarningContext) out = binary.LittleEndian.AppendUint64(buf, v) case mysql.TypeJSON: out = appendLengthValue(buf, []byte(dat.GetMysqlJSON().String())) diff --git a/pkg/util/rowcodec/rowcodec_test.go b/pkg/util/rowcodec/rowcodec_test.go index 64844fed25e78..e3c6086cdc12d 100644 --- a/pkg/util/rowcodec/rowcodec_test.go +++ b/pkg/util/rowcodec/rowcodec_test.go @@ -292,7 +292,7 @@ func TestTypesNewRowCodec(t *testing.T) { return d } getTime := func(value string) types.Time { - d, err := types.ParseTime(stmtctx.NewStmtCtxWithTimeZone(time.UTC), value, mysql.TypeTimestamp, 6, nil) + d, err := types.ParseTime(types.DefaultStmtNoWarningContext, value, mysql.TypeTimestamp, 6, nil) require.NoError(t, err) return d } @@ -1306,7 +1306,7 @@ var ( } } getDuration = func(value string) types.Duration { - dur, _, _ := types.ParseDuration(nil, value, 0) + dur, _, _ := types.ParseDuration(types.DefaultStmtNoWarningContext, value, 0) return dur } getOldDatumByte = func(d types.Datum) []byte { diff --git a/pkg/util/servermemorylimit/BUILD.bazel b/pkg/util/servermemorylimit/BUILD.bazel index 5c81b4743a315..68302e99aeefe 100644 --- a/pkg/util/servermemorylimit/BUILD.bazel +++ b/pkg/util/servermemorylimit/BUILD.bazel @@ -11,6 +11,7 @@ go_library( "//pkg/util", "//pkg/util/logutil", "//pkg/util/memory", + "//pkg/util/sqlkiller", "@com_github_pingcap_failpoint//:failpoint", "@org_uber_go_atomic//:atomic", "@org_uber_go_zap//:zap", diff --git a/pkg/util/servermemorylimit/servermemorylimit.go b/pkg/util/servermemorylimit/servermemorylimit.go index 8c5c12ff3700d..8b80598c559f6 100644 --- a/pkg/util/servermemorylimit/servermemorylimit.go +++ b/pkg/util/servermemorylimit/servermemorylimit.go @@ -27,6 +27,7 @@ import ( "github.com/pingcap/tidb/pkg/util" "github.com/pingcap/tidb/pkg/util/logutil" "github.com/pingcap/tidb/pkg/util/memory" + "github.com/pingcap/tidb/pkg/util/sqlkiller" atomicutil "go.uber.org/atomic" "go.uber.org/zap" ) @@ -157,7 +158,7 @@ func killSessIfNeeded(s *sessionToBeKilled, bt uint64, sm util.SessionManager) { s.sqlStartTime = info.Time s.isKilling = true s.sessionTracker = t - t.NeedKill.Store(true) + t.Killer.SendKillSignal(sqlkiller.ServerMemoryExceeded) killTime := time.Now() SessionKillTotal.Add(1) diff --git a/pkg/types/context/BUILD.bazel b/pkg/util/sqlescape/BUILD.bazel similarity index 50% rename from pkg/types/context/BUILD.bazel rename to pkg/util/sqlescape/BUILD.bazel index 5d9a1d17e6e49..73684e5650b50 100644 --- a/pkg/types/context/BUILD.bazel +++ b/pkg/util/sqlescape/BUILD.bazel @@ -1,25 +1,21 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( - name = "context", - srcs = [ - "context.go", - "truncate.go", - ], - importpath = "github.com/pingcap/tidb/pkg/types/context", + name = "sqlescape", + srcs = ["utils.go"], + importpath = "github.com/pingcap/tidb/pkg/util/sqlescape", visibility = ["//visibility:public"], deps = [ - "//pkg/errno", - "//pkg/util/intest", + "//pkg/util/hack", "@com_github_pingcap_errors//:errors", ], ) go_test( - name = "context_test", + name = "sqlescape_test", timeout = "short", - srcs = ["context_test.go"], - embed = [":context"], + srcs = ["utils_test.go"], + embed = [":sqlescape"], flaky = True, deps = ["@com_github_stretchr_testify//require"], ) diff --git a/pkg/util/sqlexec/utils.go b/pkg/util/sqlescape/utils.go similarity index 99% rename from pkg/util/sqlexec/utils.go rename to pkg/util/sqlescape/utils.go index 877da4b94af6f..0a6d56b98e700 100644 --- a/pkg/util/sqlexec/utils.go +++ b/pkg/util/sqlescape/utils.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package sqlexec +package sqlescape import ( "encoding/json" diff --git a/pkg/util/sqlexec/utils_test.go b/pkg/util/sqlescape/utils_test.go similarity index 99% rename from pkg/util/sqlexec/utils_test.go rename to pkg/util/sqlescape/utils_test.go index 23f8119409a50..a385d34b72c98 100644 --- a/pkg/util/sqlexec/utils_test.go +++ b/pkg/util/sqlescape/utils_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package sqlexec +package sqlescape import ( "encoding/json" diff --git a/pkg/util/sqlexec/BUILD.bazel b/pkg/util/sqlexec/BUILD.bazel index 613d6dc7758e6..10399c42e69a3 100644 --- a/pkg/util/sqlexec/BUILD.bazel +++ b/pkg/util/sqlexec/BUILD.bazel @@ -5,7 +5,6 @@ go_library( srcs = [ "restricted_sql_executor.go", "simple_record_set.go", - "utils.go", ], importpath = "github.com/pingcap/tidb/pkg/util/sqlexec", visibility = ["//visibility:public"], @@ -16,8 +15,6 @@ go_library( "//pkg/sessionctx/variable", "//pkg/types", "//pkg/util/chunk", - "//pkg/util/hack", - "@com_github_pingcap_errors//:errors", "@com_github_pingcap_kvproto//pkg/kvrpcpb", ], ) @@ -25,15 +22,11 @@ go_library( go_test( name = "sqlexec_test", timeout = "short", - srcs = [ - "main_test.go", - "utils_test.go", - ], + srcs = ["main_test.go"], embed = [":sqlexec"], flaky = True, deps = [ "//pkg/testkit/testsetup", - "@com_github_stretchr_testify//require", "@org_uber_go_goleak//:goleak", ], ) diff --git a/pkg/util/sqlkiller/BUILD.bazel b/pkg/util/sqlkiller/BUILD.bazel new file mode 100644 index 0000000000000..69ab87a74a96b --- /dev/null +++ b/pkg/util/sqlkiller/BUILD.bazel @@ -0,0 +1,13 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "sqlkiller", + srcs = ["sqlkiller.go"], + importpath = "github.com/pingcap/tidb/pkg/util/sqlkiller", + visibility = ["//visibility:public"], + deps = [ + "//pkg/util/dbterror/exeerrors", + "//pkg/util/logutil", + "@org_uber_go_zap//:zap", + ], +) diff --git a/pkg/util/sqlkiller/sqlkiller.go b/pkg/util/sqlkiller/sqlkiller.go new file mode 100644 index 0000000000000..88d5e90f9c320 --- /dev/null +++ b/pkg/util/sqlkiller/sqlkiller.go @@ -0,0 +1,68 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sqlkiller + +import ( + "sync/atomic" + + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" + "github.com/pingcap/tidb/pkg/util/logutil" + "go.uber.org/zap" +) + +type killSignal = uint32 + +// KillSignal types. +const ( + UnspecifiedKillSignal killSignal = iota + QueryInterrupted + MaxExecTimeExceeded + QueryMemoryExceeded + ServerMemoryExceeded +) + +// SQLKiller is used to kill a query. +type SQLKiller struct { + Signal killSignal + ConnID uint64 +} + +// SendKillSignal sends a kill signal to the query. +func (killer *SQLKiller) SendKillSignal(reason killSignal) { + atomic.CompareAndSwapUint32(&killer.Signal, 0, reason) +} + +// HandleSignal handles the kill signal and return the error. +func (killer *SQLKiller) HandleSignal() error { + status := atomic.LoadUint32(&killer.Signal) + switch status { + case QueryInterrupted: + return exeerrors.ErrQueryInterrupted.GenWithStackByArgs() + case MaxExecTimeExceeded: + return exeerrors.ErrMaxExecTimeExceeded.GenWithStackByArgs() + case QueryMemoryExceeded: + return exeerrors.ErrMemoryExceedForQuery.GenWithStackByArgs(killer.ConnID) + case ServerMemoryExceeded: + logutil.BgLogger().Warn("global memory controller, NeedKill signal is received successfully", + zap.Uint64("conn", killer.ConnID)) + return exeerrors.ErrMemoryExceedForInstance.GenWithStackByArgs(killer.ConnID) + } + return nil +} + +// Reset resets the SqlKiller. +func (killer *SQLKiller) Reset() { + atomic.StoreUint32(&killer.Signal, 0) +} diff --git a/pkg/util/timeutil/time_zone.go b/pkg/util/timeutil/time_zone.go index 173810695414c..2c0f4f95a3d0e 100644 --- a/pkg/util/timeutil/time_zone.go +++ b/pkg/util/timeutil/time_zone.go @@ -237,7 +237,7 @@ func ParseTimeZone(s string) (*time.Location, error) { // The value can be given as a string indicating an offset from UTC, such as '+10:00' or '-6:00'. // The time zone's value should in [-12:59,+14:00]. if strings.HasPrefix(s, "+") || strings.HasPrefix(s, "-") { - d, _, err := types.ParseDuration(nil, s[1:], 0) + d, _, err := types.ParseDuration(types.DefaultStmtNoWarningContext, s[1:], 0) if err == nil { if s[0] == '-' { if d.Duration > 12*time.Hour+59*time.Minute { diff --git a/pkg/util/util.go b/pkg/util/util.go index eb2f9d5e584dd..ced69cfbd0bb7 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -296,3 +296,11 @@ func IsInCorrectIdentifierName(name string) bool { } return false } + +// GetRecoverError gets the error from recover. +func GetRecoverError(r interface{}) error { + if err, ok := r.(error); ok { + return err + } + return errors.Errorf("%v", r) +} diff --git a/tests/globalkilltest/Dockerfile b/tests/globalkilltest/Dockerfile index 868f5888656ca..0d90ea007145f 100644 --- a/tests/globalkilltest/Dockerfile +++ b/tests/globalkilltest/Dockerfile @@ -32,8 +32,8 @@ WORKDIR /go/src/github.com/pingcap/tidb COPY go.mod . COPY go.sum . -COPY parser/go.mod parser/go.mod -COPY parser/go.sum parser/go.sum +COPY pkg/parser/go.mod pkg/parser/go.mod +COPY pkg/parser/go.sum pkg/parser/go.sum ARG GOPROXY RUN GO111MODULE=on GOPROXY=${GOPROXY} go mod download diff --git a/tests/globalkilltest/Makefile b/tests/globalkilltest/Makefile index 1b5de94e9d40d..657f4fabe1099 100644 --- a/tests/globalkilltest/Makefile +++ b/tests/globalkilltest/Makefile @@ -17,16 +17,16 @@ OUT_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))/bin) include $(BASE_DIR)/Makefile.common -GLOBAL_KILL_TEST_SERVER_LDFLAGS = -X "github.com/pingcap/tidb/domain.ldflagIsGlobalKillTest=1" -GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/domain.ldflagServerIDTTL=10" -GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/domain.ldflagServerIDTimeToKeepAlive=1" -GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/domain.ldflagServerIDTimeToCheckPDConnectionRestored=1" -GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/domain.ldflagLostConnectionToPDTimeout=5" -GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/store.ldflagGetEtcdAddrsFromConfig=1" - -GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/util/globalconn.ldflagIsGlobalKillTest=1" -GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/util/globalconn.ldflagServerIDBits32=2" -GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/util/globalconn.ldflagLocalConnIDBits32=4" +GLOBAL_KILL_TEST_SERVER_LDFLAGS = -X "github.com/pingcap/tidb/pkg/domain.ldflagIsGlobalKillTest=1" +GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/pkg/domain.ldflagServerIDTTL=10" +GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/pkg/domain.ldflagServerIDTimeToKeepAlive=1" +GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/pkg/domain.ldflagServerIDTimeToCheckPDConnectionRestored=1" +GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/pkg/domain.ldflagLostConnectionToPDTimeout=5" +GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/pkg/store.ldflagGetEtcdAddrsFromConfig=1" + +GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/pkg/util/globalconn.ldflagIsGlobalKillTest=1" +GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/pkg/util/globalconn.ldflagServerIDBits32=2" +GLOBAL_KILL_TEST_SERVER_LDFLAGS += -X "github.com/pingcap/tidb/pkg/util/globalconn.ldflagLocalConnIDBits32=4" .PHONY: server buildsucc diff --git a/tests/integrationtest/r/ddl/db_integration.result b/tests/integrationtest/r/ddl/db_integration.result index 515bb8774d5a6..cf6f13e14cdc3 100644 --- a/tests/integrationtest/r/ddl/db_integration.result +++ b/tests/integrationtest/r/ddl/db_integration.result @@ -139,7 +139,7 @@ Error 1166 (42000): Incorrect column name '_tidb_rowid' create table aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa(a int); Error 1059 (42000): Identifier name 'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa' is too long create table test_error_code1 (c1 int, c2 int, key aa (c1, c2), key aa (c1)); -Error 1061 (42000): duplicate key name aa +Error 1061 (42000): Duplicate key name 'aa' create table test_error_code1 (c1 int, c2 int, c3 int, key(c_not_exist)); Error 1072 (42000): column does not exist: c_not_exist create table test_error_code1 (c1 int, c2 int, c3 int, primary key(c_not_exist)); @@ -240,7 +240,7 @@ alter table test_error_code_succ add index idx (c_not_exist); Error 1072 (42000): column does not exist: c_not_exist alter table test_error_code_succ add index idx (c1); alter table test_error_code_succ add index idx (c1); -Error 1061 (42000): index already exist idx +Error 1061 (42000): Duplicate key name 'idx' alter table test_error_code_succ drop index idx_not_exist; Error 1091 (42000): index idx_not_exist doesn't exist alter table test_error_code_succ drop column c3; diff --git a/tests/integrationtest/r/ddl/db_partition.result b/tests/integrationtest/r/ddl/db_partition.result index 9d9523b794bdc..3e31818463eb0 100644 --- a/tests/integrationtest/r/ddl/db_partition.result +++ b/tests/integrationtest/r/ddl/db_partition.result @@ -1260,6 +1260,9 @@ PARTITION pfuture VALUES LESS THAN (MAXVALUE)); alter table t1p exchange partition p202307 with table t1; Error 1736 (HY000): Tables have different definitions insert into t1 values ("2023-08-06","0000"); +drop table t1, t1p; +drop placement policy rule1; +drop placement policy rule2; drop table if exists pt; create table pt (a int primary key auto_increment) partition by range(a) ( partition p0 values less than (3), diff --git a/tests/integrationtest/r/ddl/multi_schema_change.result b/tests/integrationtest/r/ddl/multi_schema_change.result index d76c1638e6fc4..c1668d475e442 100644 --- a/tests/integrationtest/r/ddl/multi_schema_change.result +++ b/tests/integrationtest/r/ddl/multi_schema_change.result @@ -217,7 +217,7 @@ Error 1176 (42000): Key 't' doesn't exist in table 't' drop table if exists t; create table t (a int, b int, c int, index t(a)); alter table t drop index t, add index t(b); -Error 1061 (42000): index already exist t +Error 1061 (42000): Duplicate key name 't' drop table if exists t; create table t (a int, b int, c int, index t(a)); alter table t add index t1(b), drop index t1; diff --git a/tests/integrationtest/r/executor/adapter.result b/tests/integrationtest/r/executor/adapter.result new file mode 100644 index 0000000000000..ea7d968aa8de4 --- /dev/null +++ b/tests/integrationtest/r/executor/adapter.result @@ -0,0 +1,34 @@ +set @a = now(6); +drop table if exists t; +create table t(a int); +insert into t values (1), (1), (1), (1), (1); +select * from t t1 join t t2 on t1.a = t2.a; +a a +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +select timestampdiff(microsecond, @a, now(6)) < 1000000; +timestampdiff(microsecond, @a, now(6)) < 1000000 +1 diff --git a/tests/integrationtest/r/executor/admin.result b/tests/integrationtest/r/executor/admin.result index 0f75f9bbee557..fc561ce64047a 100644 --- a/tests/integrationtest/r/executor/admin.result +++ b/tests/integrationtest/r/executor/admin.result @@ -81,3 +81,160 @@ create table t(a bigint unsigned primary key, b int, c int, index idx(a, b)); insert into t values(1, 1, 1), (9223372036854775807, 2, 2); admin check index t idx; +drop table if exists test_null; +CREATE TABLE test_null ( +a int(11) NOT NULL, +c int(11) NOT NULL, +PRIMARY KEY (a, c), +KEY idx_a (a) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin; +insert into test_null(a, c) values(2, 2); +ALTER TABLE test_null ADD COLUMN b int NULL DEFAULT '1795454803' AFTER a; +ALTER TABLE test_null add index b(b); +ADMIN CHECK TABLE test_null; + +drop table if exists test; +create table test ( +a time, +PRIMARY KEY (a) +); +insert into test set a='12:10:36'; +admin check table test; + +drop table if exists test; +CREATE TABLE test ( a decimal, PRIMARY KEY (a)); +insert into test set a=10; +admin check table test; + +drop table if exists test; +create table test ( a TIMESTAMP, primary key(a) ); +insert into test set a='2015-08-10 04:18:49'; +admin check table test; + +drop table if exists test; +create table test ( +a int not null, +c int not null, +primary key (a, c), +key idx_a (a)) partition by range (c) ( +partition p1 values less than (1), +partition p2 values less than (4), +partition p3 values less than (7), +partition p4 values less than (11)); +insert into test values (1, 1); +insert into test values (2, 2); +insert into test values (3, 3); +insert into test values (4, 4); +insert into test values (5, 5); +insert into test values (6, 6); +insert into test values (7, 7); +insert into test values (8, 8); +insert into test values (9, 9); +insert into test values (10, 10); +admin check table test; + +drop table if exists test; +create table test ( b json , c int as (JSON_EXTRACT(b,'$.d')), index idxc(c)); +INSERT INTO test set b='{"d": 100}'; +admin check table test; + +drop table if exists t; +CREATE TABLE t ( +ID CHAR(32) NOT NULL, +name CHAR(32) NOT NULL, +value CHAR(255), +INDEX indexIDname (ID(8),name(8)) +); +INSERT INTO t VALUES ('keyword','urlprefix','text/ /text'); +admin check table t; + +use mysql; +admin check table executor__admin.t; + +admin check table t; +Error 1146 (42S02): Table 'mysql.t' doesn't exist +use executor__admin; +drop table if exists t1; +CREATE TABLE t1 (c2 YEAR, PRIMARY KEY (c2)); +INSERT INTO t1 SET c2 = '1912'; +ALTER TABLE t1 ADD COLUMN c3 TIMESTAMP NULL DEFAULT '1976-08-29 16:28:11'; +ALTER TABLE t1 ADD COLUMN c4 DATE NULL DEFAULT '1976-08-29'; +ALTER TABLE t1 ADD COLUMN c5 TIME NULL DEFAULT '16:28:11'; +ALTER TABLE t1 ADD COLUMN c6 YEAR NULL DEFAULT '1976'; +ALTER TABLE t1 ADD INDEX idx1 (c2, c3,c4,c5,c6); +ALTER TABLE t1 ADD INDEX idx2 (c2); +ALTER TABLE t1 ADD INDEX idx3 (c3); +ALTER TABLE t1 ADD INDEX idx4 (c4); +ALTER TABLE t1 ADD INDEX idx5 (c5); +ALTER TABLE t1 ADD INDEX idx6 (c6); +admin check table t1; + +drop table if exists td1; +CREATE TABLE td1 (c2 INT NULL DEFAULT '70'); +INSERT INTO td1 SET c2 = '5'; +ALTER TABLE td1 ADD COLUMN c4 DECIMAL(12,8) NULL DEFAULT '213.41598062'; +ALTER TABLE td1 ADD INDEX id2 (c4) ; +ADMIN CHECK TABLE td1; + +drop table if exists t1; +create table t1 (a int); +insert into t1 set a=2; +alter table t1 add column b timestamp not null; +alter table t1 add index(b); +admin check table t1; + +drop table if exists t1; +create table t1 (a decimal(2,1), index(a)); +insert into t1 set a='1.9'; +alter table t1 modify column a decimal(3,2); +delete from t1; +admin check table t1; + +drop table if exists check_index_test; +create table check_index_test (a int, b varchar(10), index a_b (a, b), index b (b)); +insert check_index_test values (3, "ab"),(2, "cd"),(1, "ef"),(-1, "hi"); +admin check index check_index_test a_b (2, 4); +a b extra_handle +1 ef 3 +2 cd 2 +admin check index check_index_test a_b (3, 5); +a b extra_handle +-1 hi 4 +1 ef 3 +use mysql; +admin check index executor__admin.check_index_test a_b (2, 3), (4, 5); +a b extra_handle +-1 hi 4 +2 cd 2 +use executor__admin; +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)); +insert admin_test (c1, c2) values (1, 1), (2, 2), (5, 5), (10, 10), (11, 11), (NULL, NULL); +admin check index admin_test c1; + +admin check index admin_test c2; + +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)) partition by hash(c2) partitions 5; +insert admin_test (c1, c2) values (1, 1), (2, 2), (5, 5), (10, 10), (11, 11), (NULL, NULL); +admin check index admin_test c1; + +admin check index admin_test c2; + +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)) PARTITION BY RANGE ( c2 ) ( +PARTITION p0 VALUES LESS THAN (5), +PARTITION p1 VALUES LESS THAN (10), +PARTITION p2 VALUES LESS THAN (MAXVALUE)); +insert admin_test (c1, c2) values (1, 1), (2, 2), (5, 5), (10, 10), (11, 11), (NULL, NULL); +admin check index admin_test c1; + +admin check index admin_test c2; + +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, primary key(c1), key(c2)); +set cte_max_recursion_depth=100000; +insert into admin_test with recursive cte(a, b) as (select 1, 1 union select a+1, b+1 from cte where cte.a< 100000) select * from cte; +select /*+ read_from_storage(tikv[`executor__admin`.`admin_test`]) */ bit_xor(crc32(md5(concat_ws(0x2, `c1`, `c2`)))), ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024), count(*) from `executor__admin`.`admin_test` use index() where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024); +select bit_xor(crc32(md5(concat_ws(0x2, `c1`, `c2`)))), ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024), count(*) from `executor__admin`.`admin_test` use index(`c2`) where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024); +set cte_max_recursion_depth=default; diff --git a/tests/integrationtest/r/executor/aggregate.result b/tests/integrationtest/r/executor/aggregate.result index f9847d92c5629..9cdc981eec010 100644 --- a/tests/integrationtest/r/executor/aggregate.result +++ b/tests/integrationtest/r/executor/aggregate.result @@ -1553,3 +1553,511 @@ a select a from t group by (-a); Error 1055 (42000): Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'executor__aggregate.t.a' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by set sql_mode = default; +drop table if exists test; +create table test(id int, name int); +insert into test values(1, 10); +insert into test values(1, 20); +insert into test values(1, 30); +insert into test values(2, 20); +insert into test values(3, 200); +insert into test values(3, 500); +select id, group_concat(name) from test group by id order by id; +id group_concat(name) +1 10,20,30 +2 20 +3 200,500 +select id, group_concat(name SEPARATOR ';') from test group by id order by id; +id group_concat(name SEPARATOR ';') +1 10;20;30 +2 20 +3 200;500 +select id, group_concat(name SEPARATOR ',') from test group by id order by id; +id group_concat(name SEPARATOR ',') +1 10,20,30 +2 20 +3 200,500 +select id, group_concat(name SEPARATOR '%') from test group by id order by id; +id group_concat(name SEPARATOR '%') +1 10%20%30 +2 20 +3 200%500 +select id, group_concat(name SEPARATOR '') from test group by id order by id; +id group_concat(name SEPARATOR '') +1 102030 +2 20 +3 200500 +select id, group_concat(name SEPARATOR '123') from test group by id order by id; +id group_concat(name SEPARATOR '123') +1 101232012330 +2 20 +3 200123500 +select group_concat(id ORDER BY name) from (select * from test order by id, name limit 2,2) t; +group_concat(id ORDER BY name) +2,1 +select group_concat(id ORDER BY name desc) from (select * from test order by id, name limit 2,2) t; +group_concat(id ORDER BY name desc) +1,2 +select group_concat(name ORDER BY id) from (select * from test order by id, name limit 2,2) t; +group_concat(name ORDER BY id) +30,20 +select group_concat(name ORDER BY id desc) from (select * from test order by id, name limit 2,2) t; +group_concat(name ORDER BY id desc) +20,30 +select group_concat(name ORDER BY name desc SEPARATOR '++') from test; +group_concat(name ORDER BY name desc SEPARATOR '++') +500++200++30++20++20++10 +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1--1--2--1 +select group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(name ORDER BY name desc SEPARATOR '++') group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +500++200++30++20++20++10 3--3--1--1--2--1 +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200,30,20,10 +set session group_concat_max_len=4; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=5; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=6; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3-- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=7; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=8; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=9; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1-- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=10; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1--1 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=11; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1--1- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=12; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1--1-- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=13; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1--1--2 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=14; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1--1--2- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=15; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +3--3--1--1--2-- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=4; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=5; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=6; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2-- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=7; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=8; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=9; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1-- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=10; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1--1 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=11; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1--1- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=12; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1--1-- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=13; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1--1--3 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=14; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1--1--3- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=15; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +group_concat(id ORDER BY name asc, id desc SEPARATOR '--') +1--2--1--1--3-- +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=4; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500, +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=5; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,2 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=6; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,20 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=7; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=8; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200, +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=9; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200,3 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=10; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200,30 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=11; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200,30, +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=12; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200,30,2 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=13; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200,30,20 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=14; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200,30,20, +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=15; +select group_concat(distinct name order by name desc) from test; +group_concat(distinct name order by name desc) +500,200,30,20,1 +select @@warning_count; +@@warning_count +1 +set session group_concat_max_len=default; +drop table if exists test2; +create table test2(id varchar(20), name varchar(20)); +insert into test2 select * from test; +select group_concat(id ORDER BY name) from (select * from test2 order by id, name limit 2,2) t; +group_concat(id ORDER BY name) +2,1 +select group_concat(id ORDER BY name desc) from (select * from test2 order by id, name limit 2,2) t; +group_concat(id ORDER BY name desc) +1,2 +select group_concat(name ORDER BY id) from (select * from test2 order by id, name limit 2,2) t; +group_concat(name ORDER BY id) +30,20 +select group_concat(name ORDER BY id desc) from (select * from test2 order by id, name limit 2,2) t; +group_concat(name ORDER BY id desc) +20,30 +select group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test2; +group_concat(name ORDER BY name desc SEPARATOR '++') group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +500++30++200++20++20++10 3--1--3--1--2--1 +select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 1 desc, id SEPARATOR '++') from test; +1 2 3 4 5 group_concat(name, id ORDER BY 1 desc, id SEPARATOR '++') +1 2 3 4 5 5003++2003++301++201++202++101 +select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 2 desc, name SEPARATOR '++') from test; +1 2 3 4 5 group_concat(name, id ORDER BY 2 desc, name SEPARATOR '++') +1 2 3 4 5 2003++5003++202++101++201++301 +select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 3 desc, name SEPARATOR '++') from test; +Error 1054 (42S22): Unknown column '3' in 'order clause' +prepare s1 from "select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY floor(id/?) desc, name SEPARATOR '++') from test"; +set @a=2; +execute s1 using @a; +1 2 3 4 5 group_concat(name, id ORDER BY floor(id/?) desc, name SEPARATOR '++') +1 2 3 4 5 202++2003++5003++101++201++301 +prepare s1 from "select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY ? desc, name SEPARATOR '++') from test"; +set @a=2; +execute s1 using @a; +1 2 3 4 5 group_concat(name, id ORDER BY ? desc, name SEPARATOR '++') +1 2 3 4 5 2003++5003++202++101++201++301 +set @a=3; +execute s1 using @a; +Error 1054 (42S22): Unknown column '?' in 'order clause' +set @a=3.0; +execute s1 using @a; +1 2 3 4 5 group_concat(name, id ORDER BY ? desc, name SEPARATOR '++') +1 2 3 4 5 101++202++201++301++2003++5003 +drop table if exists ptest; +CREATE TABLE ptest (id int,name int) PARTITION BY RANGE ( id ) (PARTITION `p0` VALUES LESS THAN (2), PARTITION `p1` VALUES LESS THAN (11)); +insert into ptest select * from test; +set session tidb_opt_distinct_agg_push_down = 0; +set session tidb_opt_agg_push_down = 0; +select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest; +group_concat(name ORDER BY name desc SEPARATOR '++') group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +500++200++30++20++20++10 3--3--1--1--2--1 +select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest; +group_concat(distinct name order by name desc) +500,200,30,20,10 +set session tidb_opt_distinct_agg_push_down = 0; +set session tidb_opt_agg_push_down = 1; +select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest; +group_concat(name ORDER BY name desc SEPARATOR '++') group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +500++200++30++20++20++10 3--3--1--1--2--1 +select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest; +group_concat(distinct name order by name desc) +500,200,30,20,10 +set session tidb_opt_distinct_agg_push_down = 1; +set session tidb_opt_agg_push_down = 0; +select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest; +group_concat(name ORDER BY name desc SEPARATOR '++') group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +500++200++30++20++20++10 3--3--1--1--2--1 +select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest; +group_concat(distinct name order by name desc) +500,200,30,20,10 +set session tidb_opt_distinct_agg_push_down = 1; +set session tidb_opt_agg_push_down = 1; +select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest; +group_concat(name ORDER BY name desc SEPARATOR '++') group_concat(id ORDER BY name desc, id asc SEPARATOR '--') +500++200++30++20++20++10 3--3--1--1--2--1 +select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest; +group_concat(distinct name order by name desc) +500,200,30,20,10 +set session tidb_opt_distinct_agg_push_down = default; +set session tidb_opt_agg_push_down = default; +select group_concat(123, null); +group_concat(123, null) +NULL +drop table if exists t1; +create table t1(cid int, sname varchar(100)); +insert into t1 values(1, 'Bob'), (1, 'Alice'); +insert into t1 values(3, 'Ace'); +set @@group_concat_max_len=5; +select group_concat(sname order by sname) from t1 group by cid; +group_concat(sname order by sname) +Alice +Ace +drop table if exists t1; +create table t1(c1 varchar(10)); +insert into t1 values('0123456789'); +insert into t1 values('12345'); +set @@group_concat_max_len=8; +select group_concat(c1 order by c1) from t1 group by c1; +group_concat(c1 order by c1) +01234567 +12345 +set @@group_concat_max_len=default; +drop table if exists select_distinct_test; +create table select_distinct_test(id int not null default 1, name varchar(255), PRIMARY KEY(id)); +insert INTO select_distinct_test VALUES (1, "hello"); +insert into select_distinct_test values (2, "hello"); +begin; +select distinct name from select_distinct_test; +name +hello +commit; +drop table if exists t; +create table t (i int); +insert into t values (1), (1), (1),(2),(3),(2),(3),(2),(3); +explain format = 'brief' select * from t order by i + 1; +id estRows task access object operator info +Projection 10000.00 root executor__aggregate.t.i +└─Sort 10000.00 root Column#3 + └─Projection 10000.00 root executor__aggregate.t.i, plus(executor__aggregate.t.i, 1)->Column#3 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select * from t order by i + 1; +i +1 +1 +1 +2 +2 +2 +3 +3 +3 +explain format = 'brief' select * from t order by i + 1 limit 2; +id estRows task access object operator info +Projection 2.00 root executor__aggregate.t.i +└─TopN 2.00 root Column#3, offset:0, count:2 + └─Projection 2.00 root executor__aggregate.t.i, plus(executor__aggregate.t.i, 1)->Column#3 + └─TableReader 2.00 root data:TopN + └─TopN 2.00 cop[tikv] plus(executor__aggregate.t.i, 1), offset:0, count:2 + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select * from t order by i + 1 limit 2; +i +1 +1 +select i, i, i from t order by i + 1; +i i i +1 1 1 +1 1 1 +1 1 1 +2 2 2 +2 2 2 +2 2 2 +3 3 3 +3 3 3 +3 3 3 +insert into mysql.opt_rule_blacklist value("decorrelate"); +drop table if exists test; +create table test (a int); +insert into test value(1); +select /*+ hash_agg() */ sum(a), (select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1) from test tt; +sum(a) (select NULL from test where tt.a = test.a limit 1) (select NULL from test where tt.a = test.a limit 1) (select NULL from test where tt.a = test.a limit 1) +1 NULL NULL NULL +explain format = 'brief' select /*+ hash_agg() */ sum(a), (select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1) from test tt; +id estRows task access object operator info +Projection 1.00 root Column#9, Column#12, Column#15, Column#18 +└─Apply 1.00 root CARTESIAN left outer join + ├─Apply(Build) 1.00 root CARTESIAN left outer join + │ ├─Apply(Build) 1.00 root CARTESIAN left outer join + │ │ ├─HashAgg(Build) 1.00 root funcs:sum(Column#28)->Column#9, funcs:firstrow(Column#29)->executor__aggregate.test.a + │ │ │ └─Projection 10000.00 root cast(executor__aggregate.test.a, decimal(10,0) BINARY)->Column#28, executor__aggregate.test.a->Column#29 + │ │ │ └─TableReader 10000.00 root data:TableFullScan + │ │ │ └─TableFullScan 10000.00 cop[tikv] table:tt keep order:false, stats:pseudo + │ │ └─Projection(Probe) 1.00 root ->Column#12 + │ │ └─Limit 1.00 root offset:0, count:1 + │ │ └─TableReader 1.00 root data:Limit + │ │ └─Limit 1.00 cop[tikv] offset:0, count:1 + │ │ └─Selection 1.00 cop[tikv] eq(executor__aggregate.test.a, executor__aggregate.test.a) + │ │ └─TableFullScan 1000.00 cop[tikv] table:test keep order:false, stats:pseudo + │ └─Projection(Probe) 1.00 root ->Column#15 + │ └─Limit 1.00 root offset:0, count:1 + │ └─TableReader 1.00 root data:Limit + │ └─Limit 1.00 cop[tikv] offset:0, count:1 + │ └─Selection 1.00 cop[tikv] eq(executor__aggregate.test.a, executor__aggregate.test.a) + │ └─TableFullScan 1000.00 cop[tikv] table:test keep order:false, stats:pseudo + └─Projection(Probe) 1.00 root ->Column#18 + └─Limit 1.00 root offset:0, count:1 + └─TableReader 1.00 root data:Limit + └─Limit 1.00 cop[tikv] offset:0, count:1 + └─Selection 1.00 cop[tikv] eq(executor__aggregate.test.a, executor__aggregate.test.a) + └─TableFullScan 1000.00 cop[tikv] table:test keep order:false, stats:pseudo +delete from mysql.opt_rule_blacklist where name = "decorrelate"; +admin reload opt_rule_blacklist; + diff --git a/tests/integrationtest/r/executor/analyze.result b/tests/integrationtest/r/executor/analyze.result index e3a48d683dbbf..3762068660eb4 100644 --- a/tests/integrationtest/r/executor/analyze.result +++ b/tests/integrationtest/r/executor/analyze.result @@ -761,3 +761,99 @@ drop table if exists t; create table t(a int, b int, primary key(a), index idx(b)) partition by range(a) (partition p0 values less than (10), partition p1 values less than (20)); analyze incremental table t partition p0 index idx; Error 1105 (HY000): the incremental analyze feature has already been removed in TiDB v7.5.0, so this will have no effect +drop table if exists t; +set tidb_enable_clustered_index=on; +create table t (a int, b int, c int, primary key(a, b)); +insert into t values (0, 0, 0); +insert into t values (1, 1, 1); +insert into t values (2, 2, 2); +insert into t values (3, 3, 3); +insert into t values (4, 4, 4); +insert into t values (5, 5, 5); +insert into t values (6, 6, 6); +insert into t values (7, 7, 7); +insert into t values (8, 8, 8); +insert into t values (9, 9, 9); +analyze table t; +drop table t; +create table t (a varchar(255), b int, c float, primary key(c, a)); +insert into t values (0, 0, 0); +insert into t values (1, 1, 1); +insert into t values (2, 2, 2); +insert into t values (3, 3, 3); +insert into t values (4, 4, 4); +insert into t values (5, 5, 5); +insert into t values (6, 6, 6); +insert into t values (7, 7, 7); +insert into t values (8, 8, 8); +insert into t values (9, 9, 9); +analyze table t; +drop table t; +create table t (a char(10), b decimal(5, 3), c int, primary key(a, c, b)); +insert into t values (0, 0, 0); +insert into t values (1, 1, 1); +insert into t values (2, 2, 2); +insert into t values (3, 3, 3); +insert into t values (4, 4, 4); +insert into t values (5, 5, 5); +insert into t values (6, 6, 6); +insert into t values (7, 7, 7); +insert into t values (8, 8, 8); +insert into t values (9, 9, 9); +analyze table t; +drop table t; +set tidb_enable_clustered_index=default; +set @@tidb_analyze_version = 1; +drop table if exists t0; +CREATE TABLE t0(c0 INT PRIMARY KEY); +ANALYZE TABLE t0 INDEX PRIMARY; +drop table if exists t0; +CREATE TABLE t0(c0 INT, c1 INT, PRIMARY KEY(c0, c1)); +INSERT INTO t0 VALUES (0, 0); +ANALYZE TABLE t0; +drop table if exists t0; +CREATE TABLE t0(c0 INT); +INSERT INTO t0 VALUES (0); +CREATE INDEX i0 ON t0(c0); +ANALYZE TABLE t0 INDEX i0; +set @@tidb_analyze_version = default; +drop table if exists t; +create table t(a int, b int, c json, d text, e mediumtext, f blob, g mediumblob, index idx(d(10))); +set @@session.tidb_analyze_skip_column_types = 'json,blob,mediumblob,text,mediumtext'; +delete from mysql.analyze_jobs; +analyze table t; +select job_info from mysql.analyze_jobs where job_info like '%analyze table%'; +job_info +analyze table columns a, b, d with 256 buckets, 500 topn, 1 samplerate +delete from mysql.analyze_jobs; +analyze table t columns a, e; +select job_info from mysql.analyze_jobs where job_info like '%analyze table%'; +job_info +analyze table columns a, d with 256 buckets, 500 topn, 1 samplerate +set @@session.tidb_analyze_skip_column_types = default; +DROP TABLE IF EXISTS Issue34228; +CREATE TABLE Issue34228 (id bigint NOT NULL, dt datetime NOT NULL) PARTITION BY RANGE COLUMNS(dt) (PARTITION p202201 VALUES LESS THAN ("2022-02-01"), PARTITION p202202 VALUES LESS THAN ("2022-03-01")); +INSERT INTO Issue34228 VALUES (1, '2022-02-01 00:00:02'), (2, '2022-02-01 00:00:02'); +SET @@global.tidb_analyze_version = 1; +SET @@session.tidb_partition_prune_mode = 'static'; +ANALYZE TABLE Issue34228; +SET @@session.tidb_partition_prune_mode = 'dynamic'; +ANALYZE TABLE Issue34228; +SELECT * FROM Issue34228; +id dt +1 2022-02-01 00:00:02 +2 2022-02-01 00:00:02 +DROP TABLE IF EXISTS Issue34228; +CREATE TABLE Issue34228 (id bigint NOT NULL, dt datetime NOT NULL) PARTITION BY RANGE COLUMNS(dt) (PARTITION p202201 VALUES LESS THAN ("2022-02-01"), PARTITION p202202 VALUES LESS THAN ("2022-03-01")); +INSERT INTO Issue34228 VALUES (1, '2022-02-01 00:00:02'), (2, '2022-02-01 00:00:02'); +SET @@global.tidb_analyze_version = 1; +SET @@session.tidb_partition_prune_mode = 'static'; +ANALYZE TABLE Issue34228; +SET @@session.tidb_partition_prune_mode = 'dynamic'; +ANALYZE TABLE Issue34228; +SELECT * FROM Issue34228; +id dt +1 2022-02-01 00:00:02 +2 2022-02-01 00:00:02 +SET @@global.tidb_analyze_version = default; +SET @@session.tidb_partition_prune_mode = default; diff --git a/tests/integrationtest/r/executor/autoid.result b/tests/integrationtest/r/executor/autoid.result new file mode 100644 index 0000000000000..3c617c0a345eb --- /dev/null +++ b/tests/integrationtest/r/executor/autoid.result @@ -0,0 +1,739 @@ +drop table if exists t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; +create table t0 (a tinyint signed key auto_increment) ; +insert into t0 values (-128); +insert into t0 values (); +insert into t0 values (0); +insert into t0 values (); +insert into t0 values (127); +insert into t0 values (); +Error 1690 (22003): constant 128 overflows tinyint +select * from t0 order by a; +a +-128 +1 +2 +3 +127 +drop table t0; +create table t1 (a tinyint unsigned key auto_increment) ; +insert into t1 values (0); +insert into t1 values (); +insert into t1 values (127); +insert into t1 values (); +insert into t1 values (255); +insert into t1 values (); +Error 1690 (22003): constant 256 overflows tinyint +select * from t1 order by a; +a +1 +2 +127 +128 +255 +drop table t1; +create table t2 (a smallint signed key auto_increment) ; +insert into t2 values (-32768); +insert into t2 values (); +insert into t2 values (0); +insert into t2 values (); +insert into t2 values (32767); +insert into t2 values (); +Error 1690 (22003): constant 32768 overflows smallint +select * from t2 order by a; +a +-32768 +1 +2 +3 +32767 +drop table t2; +create table t3 (a smallint unsigned key auto_increment) ; +insert into t3 values (0); +insert into t3 values (); +insert into t3 values (32767); +insert into t3 values (); +insert into t3 values (65535); +insert into t3 values (); +Error 1690 (22003): constant 65536 overflows smallint +select * from t3 order by a; +a +1 +2 +32767 +32768 +65535 +drop table t3; +create table t4 (a mediumint signed key auto_increment) ; +insert into t4 values (-8388608); +insert into t4 values (); +insert into t4 values (0); +insert into t4 values (); +insert into t4 values (8388607); +insert into t4 values (); +Error 1690 (22003): constant 8388608 overflows mediumint +select * from t4 order by a; +a +-8388608 +1 +2 +3 +8388607 +drop table t4; +create table t5 (a mediumint unsigned key auto_increment) ; +insert into t5 values (0); +insert into t5 values (); +insert into t5 values (8388607); +insert into t5 values (); +insert into t5 values (16777215); +insert into t5 values (); +Error 1690 (22003): constant 16777216 overflows mediumint +select * from t5 order by a; +a +1 +2 +8388607 +8388608 +16777215 +drop table t5; +create table t6 (a integer signed key auto_increment) ; +insert into t6 values (-2147483648); +insert into t6 values (); +insert into t6 values (0); +insert into t6 values (); +insert into t6 values (2147483647); +insert into t6 values (); +Error 1690 (22003): constant 2147483648 overflows int +select * from t6 order by a; +a +-2147483648 +1 +2 +3 +2147483647 +drop table t6; +create table t7 (a integer unsigned key auto_increment) ; +insert into t7 values (0); +insert into t7 values (); +insert into t7 values (2147483647); +insert into t7 values (); +insert into t7 values (4294967295); +insert into t7 values (); +Error 1690 (22003): constant 4294967296 overflows int +select * from t7 order by a; +a +1 +2 +2147483647 +2147483648 +4294967295 +drop table t7; +create table t8 (a bigint signed key auto_increment) ; +insert into t8 values (-9223372036854775808); +insert into t8 values (); +insert into t8 values (0); +insert into t8 values (); +insert into t8 values (9223372036854775807); +insert into t8 values (); +Error 1467 (HY000): Failed to read auto-increment value from storage engine +select * from t8 order by a; +a +-9223372036854775808 +1 +2 +3 +9223372036854775807 +drop table t8; +create table t9 (a bigint unsigned key auto_increment) ; +insert into t9 values (0); +insert into t9 values (); +insert into t9 values (9223372036854775807); +insert into t9 values (); +select * from t9 order by a; +a +1 +2 +9223372036854775807 +9223372036854775808 +drop table t9; +create table t0 (a tinyint signed key auto_increment) auto_id_cache 1; +insert into t0 values (-128); +insert into t0 values (); +insert into t0 values (0); +insert into t0 values (); +insert into t0 values (127); +insert into t0 values (); +Error 1690 (22003): constant 128 overflows tinyint +select * from t0 order by a; +a +-128 +1 +2 +3 +127 +drop table t0; +create table t1 (a tinyint unsigned key auto_increment) auto_id_cache 1; +insert into t1 values (0); +insert into t1 values (); +insert into t1 values (127); +insert into t1 values (); +insert into t1 values (255); +insert into t1 values (); +Error 1690 (22003): constant 256 overflows tinyint +select * from t1 order by a; +a +1 +2 +127 +128 +255 +drop table t1; +create table t2 (a smallint signed key auto_increment) auto_id_cache 1; +insert into t2 values (-32768); +insert into t2 values (); +insert into t2 values (0); +insert into t2 values (); +insert into t2 values (32767); +insert into t2 values (); +Error 1690 (22003): constant 32768 overflows smallint +select * from t2 order by a; +a +-32768 +1 +2 +3 +32767 +drop table t2; +create table t3 (a smallint unsigned key auto_increment) auto_id_cache 1; +insert into t3 values (0); +insert into t3 values (); +insert into t3 values (32767); +insert into t3 values (); +insert into t3 values (65535); +insert into t3 values (); +Error 1690 (22003): constant 65536 overflows smallint +select * from t3 order by a; +a +1 +2 +32767 +32768 +65535 +drop table t3; +create table t4 (a mediumint signed key auto_increment) auto_id_cache 1; +insert into t4 values (-8388608); +insert into t4 values (); +insert into t4 values (0); +insert into t4 values (); +insert into t4 values (8388607); +insert into t4 values (); +Error 1690 (22003): constant 8388608 overflows mediumint +select * from t4 order by a; +a +-8388608 +1 +2 +3 +8388607 +drop table t4; +create table t5 (a mediumint unsigned key auto_increment) auto_id_cache 1; +insert into t5 values (0); +insert into t5 values (); +insert into t5 values (8388607); +insert into t5 values (); +insert into t5 values (16777215); +insert into t5 values (); +Error 1690 (22003): constant 16777216 overflows mediumint +select * from t5 order by a; +a +1 +2 +8388607 +8388608 +16777215 +drop table t5; +create table t6 (a integer signed key auto_increment) auto_id_cache 1; +insert into t6 values (-2147483648); +insert into t6 values (); +insert into t6 values (0); +insert into t6 values (); +insert into t6 values (2147483647); +insert into t6 values (); +Error 1690 (22003): constant 2147483648 overflows int +select * from t6 order by a; +a +-2147483648 +1 +2 +3 +2147483647 +drop table t6; +create table t7 (a integer unsigned key auto_increment) auto_id_cache 1; +insert into t7 values (0); +insert into t7 values (); +insert into t7 values (2147483647); +insert into t7 values (); +insert into t7 values (4294967295); +insert into t7 values (); +Error 1690 (22003): constant 4294967296 overflows int +select * from t7 order by a; +a +1 +2 +2147483647 +2147483648 +4294967295 +drop table t7; +create table t8 (a bigint signed key auto_increment) auto_id_cache 1; +insert into t8 values (-9223372036854775808); +insert into t8 values (); +insert into t8 values (0); +insert into t8 values (); +insert into t8 values (9223372036854775807); +insert into t8 values (); +Error 1105 (HY000): auto increment action failed +select * from t8 order by a; +a +-9223372036854775808 +1 +2 +3 +9223372036854775807 +drop table t8; +create table t9 (a bigint unsigned key auto_increment) auto_id_cache 1; +insert into t9 values (0); +insert into t9 values (); +insert into t9 values (9223372036854775807); +insert into t9 values (); +select * from t9 order by a; +a +1 +2 +9223372036854775807 +9223372036854775808 +drop table t9; +create table t0 (a tinyint signed key auto_increment) auto_id_cache 100; +insert into t0 values (-128); +insert into t0 values (); +insert into t0 values (0); +insert into t0 values (); +insert into t0 values (127); +insert into t0 values (); +Error 1690 (22003): constant 128 overflows tinyint +select * from t0 order by a; +a +-128 +1 +2 +3 +127 +drop table t0; +create table t1 (a tinyint unsigned key auto_increment) auto_id_cache 100; +insert into t1 values (0); +insert into t1 values (); +insert into t1 values (127); +insert into t1 values (); +insert into t1 values (255); +insert into t1 values (); +Error 1690 (22003): constant 256 overflows tinyint +select * from t1 order by a; +a +1 +2 +127 +128 +255 +drop table t1; +create table t2 (a smallint signed key auto_increment) auto_id_cache 100; +insert into t2 values (-32768); +insert into t2 values (); +insert into t2 values (0); +insert into t2 values (); +insert into t2 values (32767); +insert into t2 values (); +Error 1690 (22003): constant 32768 overflows smallint +select * from t2 order by a; +a +-32768 +1 +2 +3 +32767 +drop table t2; +create table t3 (a smallint unsigned key auto_increment) auto_id_cache 100; +insert into t3 values (0); +insert into t3 values (); +insert into t3 values (32767); +insert into t3 values (); +insert into t3 values (65535); +insert into t3 values (); +Error 1690 (22003): constant 65536 overflows smallint +select * from t3 order by a; +a +1 +2 +32767 +32768 +65535 +drop table t3; +create table t4 (a mediumint signed key auto_increment) auto_id_cache 100; +insert into t4 values (-8388608); +insert into t4 values (); +insert into t4 values (0); +insert into t4 values (); +insert into t4 values (8388607); +insert into t4 values (); +Error 1690 (22003): constant 8388608 overflows mediumint +select * from t4 order by a; +a +-8388608 +1 +2 +3 +8388607 +drop table t4; +create table t5 (a mediumint unsigned key auto_increment) auto_id_cache 100; +insert into t5 values (0); +insert into t5 values (); +insert into t5 values (8388607); +insert into t5 values (); +insert into t5 values (16777215); +insert into t5 values (); +Error 1690 (22003): constant 16777216 overflows mediumint +select * from t5 order by a; +a +1 +2 +8388607 +8388608 +16777215 +drop table t5; +create table t6 (a integer signed key auto_increment) auto_id_cache 100; +insert into t6 values (-2147483648); +insert into t6 values (); +insert into t6 values (0); +insert into t6 values (); +insert into t6 values (2147483647); +insert into t6 values (); +Error 1690 (22003): constant 2147483648 overflows int +select * from t6 order by a; +a +-2147483648 +1 +2 +3 +2147483647 +drop table t6; +create table t7 (a integer unsigned key auto_increment) auto_id_cache 100; +insert into t7 values (0); +insert into t7 values (); +insert into t7 values (2147483647); +insert into t7 values (); +insert into t7 values (4294967295); +insert into t7 values (); +Error 1690 (22003): constant 4294967296 overflows int +select * from t7 order by a; +a +1 +2 +2147483647 +2147483648 +4294967295 +drop table t7; +create table t8 (a bigint signed key auto_increment) auto_id_cache 100; +insert into t8 values (-9223372036854775808); +insert into t8 values (); +insert into t8 values (0); +insert into t8 values (); +insert into t8 values (9223372036854775807); +insert into t8 values (); +Error 1467 (HY000): Failed to read auto-increment value from storage engine +select * from t8 order by a; +a +-9223372036854775808 +1 +2 +3 +9223372036854775807 +drop table t8; +create table t9 (a bigint unsigned key auto_increment) auto_id_cache 100; +insert into t9 values (0); +insert into t9 values (); +insert into t9 values (9223372036854775807); +insert into t9 values (); +select * from t9 order by a; +a +1 +2 +9223372036854775807 +9223372036854775808 +drop table t9; +create table t10 (a integer key auto_increment) auto_id_cache 1; +insert into t10 values (2147483648); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t10 values (-2147483649); +Error 1264 (22003): Out of range value for column 'a' at row 1 +drop table if exists t1, t2, t3, t11, t22, t33; +create table t1 (id int key auto_increment); +insert into t1 values (); +rename table t1 to t11; +insert into t11 values (); +select * from t11; +id +1 +30001 +create table t2 (id int key auto_increment) auto_id_cache 1; +insert into t2 values (); +rename table t2 to t22; +insert into t22 values (); +select * from t22; +id +1 +2 +create table t3 (id int key auto_increment) auto_id_cache 100; +insert into t3 values (); +rename table t3 to t33; +insert into t33 values (); +select * from t33; +id +1 +101 +drop table if exists t0; +create table t0 (id int auto_increment,k int,c char(120)) ; +drop table if exists t1; +create table t1 (id int auto_increment,k int,c char(120)) engine = MyISAM; +drop table if exists t2; +create table t2 (id int auto_increment,k int,c char(120)) engine = InnoDB; +drop table if exists t3; +create table t3 (id int auto_increment,k int,c char(120)) auto_id_cache 1; +drop table if exists t4; +create table t4 (id int auto_increment,k int,c char(120)) auto_id_cache 100; +drop table if exists t5; +create table t5 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) ; +drop table if exists t6; +create table t6 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) engine = MyISAM; +drop table if exists t7; +create table t7 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) engine = InnoDB; +drop table if exists t8; +create table t8 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) auto_id_cache 1; +drop table if exists t9; +create table t9 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) auto_id_cache 100; +drop table if exists t10; +create table t10 (id int auto_increment,k int,c char(120),key idx_1(id)) ; +drop table if exists t11; +create table t11 (id int auto_increment,k int,c char(120),key idx_1(id)) engine = MyISAM; +drop table if exists t12; +create table t12 (id int auto_increment,k int,c char(120),key idx_1(id)) engine = InnoDB; +drop table if exists t13; +create table t13 (id int auto_increment,k int,c char(120),key idx_1(id)) auto_id_cache 1; +drop table if exists t14; +create table t14 (id int auto_increment,k int,c char(120),key idx_1(id)) auto_id_cache 100; +drop table if exists t15; +create table t15 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) ; +drop table if exists t16; +create table t16 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) engine = MyISAM; +drop table if exists t17; +create table t17 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) engine = InnoDB; +drop table if exists t18; +create table t18 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) auto_id_cache 1; +drop table if exists t19; +create table t19 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) auto_id_cache 100; +create table tt1 (id int); +alter table tt1 add column (c int auto_increment); +Error 8200 (HY000): unsupported add column 'c' constraint AUTO_INCREMENT when altering 'executor__autoid.tt1' +create table tt2 (id int, c int auto_increment, key c_idx(c)); +alter table tt2 drop index c_idx; +drop table if exists t_473; +create table t_473 (id int key auto_increment); +insert into t_473 values (); +select * from t_473; +id +1 +show table t_473 next_row_id; +DB_NAME TABLE_NAME COLUMN_NAME NEXT_GLOBAL_ROW_ID ID_TYPE +executor__autoid t_473 id 30001 _TIDB_ROWID +executor__autoid t_473 id 1 AUTO_INCREMENT +alter table t_473 auto_id_cache = 100; +show table t_473 next_row_id; +DB_NAME TABLE_NAME COLUMN_NAME NEXT_GLOBAL_ROW_ID ID_TYPE +executor__autoid t_473 id 30001 _TIDB_ROWID +executor__autoid t_473 id 1 AUTO_INCREMENT +insert into t_473 values (); +select * from t_473; +id +1 +30001 +show table t_473 next_row_id; +DB_NAME TABLE_NAME COLUMN_NAME NEXT_GLOBAL_ROW_ID ID_TYPE +executor__autoid t_473 id 30101 _TIDB_ROWID +executor__autoid t_473 id 1 AUTO_INCREMENT +alter table t_473 auto_id_cache = 1; +Error 1105 (HY000): Can't Alter AUTO_ID_CACHE between 1 and non-1, the underlying implementation is different +drop table if exists io; +set auto_increment_offset = 10; +set auto_increment_increment = 5; +create table io (a int key auto_increment); +insert into io values (null),(null),(null); +select * from io; +a +10 +15 +20 +drop table io; +create table io (a int key auto_increment) AUTO_ID_CACHE 1; +insert into io values (null),(null),(null); +select * from io; +a +10 +15 +20 +drop table io; +create table io (a int key auto_increment); +set auto_increment_offset = 10; +set auto_increment_increment = 2; +insert into io values (),(),(); +select * from io; +a +10 +12 +14 +delete from io; +set auto_increment_increment = 5; +insert into io values (),(),(); +select * from io; +a +15 +20 +25 +delete from io; +set auto_increment_increment = 10; +insert into io values (),(),(); +select * from io; +a +30 +40 +50 +delete from io; +set auto_increment_increment = 5; +insert into io values (),(),(); +select * from io; +a +55 +60 +65 +drop table io; +create table io (a int key auto_increment) AUTO_ID_CACHE 1; +set auto_increment_offset = 10; +set auto_increment_increment = 2; +insert into io values (),(),(); +select * from io; +a +10 +12 +14 +delete from io; +set auto_increment_increment = 5; +insert into io values (),(),(); +select * from io; +a +15 +20 +25 +delete from io; +set auto_increment_increment = 10; +insert into io values (),(),(); +select * from io; +a +30 +40 +50 +delete from io; +set auto_increment_increment = 5; +insert into io values (),(),(); +select * from io; +a +55 +60 +65 +drop table io; +set auto_increment_offset = 10; +set auto_increment_increment = 2; +create table io (a int, b int auto_increment, key(b)); +insert into io(b) values (null),(null),(null); +select b from io; +b +10 +12 +14 +select _tidb_rowid from io; +_tidb_rowid +15 +16 +17 +delete from io; +set auto_increment_increment = 10; +insert into io(b) values (null),(null),(null); +select b from io; +b +20 +30 +40 +select _tidb_rowid from io; +_tidb_rowid +41 +42 +43 +drop table io; +set auto_increment_offset = 10; +set auto_increment_increment = 2; +create table io (a int, b int auto_increment, key(b)) AUTO_ID_CACHE 1; +insert into io(b) values (null),(null),(null); +select b from io; +b +10 +12 +14 +select _tidb_rowid from io; +_tidb_rowid +1 +2 +3 +delete from io; +set auto_increment_increment = 10; +insert into io(b) values (null),(null),(null); +select b from io; +b +20 +30 +40 +select _tidb_rowid from io; +_tidb_rowid +4 +5 +6 +drop table io; +set auto_increment_offset = -1; +show warnings; +Level Code Message +Warning 1292 Truncated incorrect auto_increment_offset value: '-1' +set auto_increment_increment = -2; +show warnings; +Level Code Message +Warning 1292 Truncated incorrect auto_increment_increment value: '-2' +show variables like 'auto_increment%'; +Variable_name Value +auto_increment_increment 1 +auto_increment_offset 1 +set auto_increment_offset = 65536; +show warnings; +Level Code Message +Warning 1292 Truncated incorrect auto_increment_offset value: '65536' +set auto_increment_increment = 65536; +show warnings; +Level Code Message +Warning 1292 Truncated incorrect auto_increment_increment value: '65536' +show variables like 'auto_increment%'; +Variable_name Value +auto_increment_increment 65535 +auto_increment_offset 65535 +set auto_increment_offset = default; +set auto_increment_increment = default; diff --git a/tests/integrationtest/r/executor/charset.result b/tests/integrationtest/r/executor/charset.result index 6736319dce5a8..18314b1606ec1 100644 --- a/tests/integrationtest/r/executor/charset.result +++ b/tests/integrationtest/r/executor/charset.result @@ -102,3 +102,21 @@ a 中文 b 一二三 一二三 一二三 中文 中文 +select 'a' collate utf8_roman_ci; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' +select cast('a' as char) collate utf8_roman_ci; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' +set names utf8 collate utf8_roman_ci; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' +set session collation_server = 'utf8_roman_ci'; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' +set session collation_database = 'utf8_roman_ci'; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' +set session collation_connection = 'utf8_roman_ci'; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' +set global collation_server = 'utf8_roman_ci'; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' +set global collation_database = 'utf8_roman_ci'; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' +set global collation_connection = 'utf8_roman_ci'; +Error 1273 (HY000): Unsupported collation when new collation is enabled: 'utf8_roman_ci' diff --git a/tests/integrationtest/r/executor/chunk_reuse.result b/tests/integrationtest/r/executor/chunk_reuse.result new file mode 100644 index 0000000000000..0c0cd608e2725 --- /dev/null +++ b/tests/integrationtest/r/executor/chunk_reuse.result @@ -0,0 +1,338 @@ +drop table if exists t1, t2, t3, t4; +create table t1 (id1 int ,id2 char(10) ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`) clustered,key id2(id2)); +insert into t1 (id1,id2)values(1,1); +insert into t1 (id1,id2)values(2,2),(3,3); +create table t2 (id1 int ,id2 char(10) ,id3 text,id4 blob,id5 json,id6 varchar(1000),PRIMARY KEY (`id1`) clustered,key id2(id2)); +insert into t2 (id1,id2)values(1,1); +insert into t2 (id1,id2)values(2,2),(3,3); +explain format='brief' select t1.id1 from t1,t2 where t1.id2 > '1' and t2.id2 > '1'; +id estRows task access object operator info +HashJoin 11111111.11 root CARTESIAN inner join +├─IndexReader(Build) 3333.33 root index:IndexRangeScan +│ └─IndexRangeScan 3333.33 cop[tikv] table:t2, index:id2(id2) range:("1",+inf], keep order:false, stats:pseudo +└─IndexReader(Probe) 3333.33 root index:IndexRangeScan + └─IndexRangeScan 3333.33 cop[tikv] table:t1, index:id2(id2) range:("1",+inf], keep order:false, stats:pseudo +select t1.id1 from t1,t2 where t1.id2 > '1' and t2.id2 > '1'; +id1 +2 +2 +3 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1,t1.id2 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +id1 id2 +2 2 +2 2 +3 3 +3 3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1,t1.id3 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +id1 id3 +2 NULL +2 NULL +3 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id4 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +id1 id4 +2 NULL +2 NULL +3 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id5 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +id1 id5 +2 NULL +2 NULL +3 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id6 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +id1 id6 +2 NULL +2 NULL +3 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1,t1.id7 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +id1 id7 +2 NULL +2 NULL +3 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +explain format='brief' select t1.id1 from t1,t2 where t1.id2 > '1'and t1.id1 = t2.id1; +id estRows task access object operator info +MergeJoin 4166.67 root inner join, left key:executor__chunk_reuse.t1.id1, right key:executor__chunk_reuse.t2.id1 +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo +└─TableReader(Probe) 3333.33 root data:Selection + └─Selection 3333.33 cop[tikv] gt(executor__chunk_reuse.t1.id2, "1") + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select t1.id1 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +id1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1 ,t1.id3 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +id1 id3 +2 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1 ,t1.id4 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +id1 id4 +2 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1 ,t1.id5 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +id1 id5 +2 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1 ,t1.id6 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +id1 id6 +2 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1 ,t1.id7 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +id1 id7 +2 NULL +3 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +explain format='brief' select t1.id1 from t1,t2 where t1.id1 = 1 and t2.id1 = 1; +id estRows task access object operator info +HashJoin 1.00 root CARTESIAN inner join +├─Point_Get(Build) 1.00 root table:t2 handle:1 +└─Point_Get(Probe) 1.00 root table:t1 handle:1 +select t1.id1 from t1,t2 where t1.id1 = 1 and t2.id1 = 1; +id1 +1 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1,t1.id2 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +id1 id2 +1 1 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1,t1.id3 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +id1 id3 +1 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id4 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +id1 id4 +1 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id5 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +id1 id5 +1 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id6 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +id1 id6 +1 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1,t1.id7 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +id1 id7 +1 NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +explain format='brief' select t1.id1,t1.id6 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2' ; +id estRows task access object operator info +HashJoin 100.00 root CARTESIAN inner join +├─IndexLookUp(Build) 10.00 root +│ ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t2, index:id2(id2) range:["2","2"], keep order:false, stats:pseudo +│ └─TableRowIDScan(Probe) 10.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─IndexLookUp(Probe) 10.00 root + ├─IndexRangeScan(Build) 10.00 cop[tikv] table:t1, index:id2(id2) range:["1","1"], keep order:false, stats:pseudo + └─TableRowIDScan(Probe) 10.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select t1.id1,t1.id6 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +id1 id6 id6 +1 NULL NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select t1.id1,t1.id3 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +id1 id3 id6 +1 NULL NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id4 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +id1 id4 id6 +1 NULL NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id5 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +id1 id5 id6 +1 NULL NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id7 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +id1 id7 id6 +1 NULL NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select t1.id1,t1.id6 ,t2.id3 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +id1 id6 id3 +1 NULL NULL +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +create table t3 (id1 int ,id2 char(10),id8 int ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`) clustered,key id2(id2),key id8(id8)); +insert into t3 (id1,id2,id8)values(1,1,1),(2,2,2),(3,3,3); +explain format='brief' select id1 from t3 where id2 > '3' or id8 < 10 union (select id1 from t3 where id2 > '4' or id8 < 7); +id estRows task access object operator info +HashAgg 8878.22 root group by:Column#17, funcs:firstrow(Column#17)->Column#17 +└─Union 11097.78 root + ├─Projection 5548.89 root executor__chunk_reuse.t3.id1->Column#17 + │ └─IndexMerge 5548.89 root type: union + │ ├─IndexRangeScan(Build) 3333.33 cop[tikv] table:t3, index:id2(id2) range:("3",+inf], keep order:false, stats:pseudo + │ ├─IndexRangeScan(Build) 3323.33 cop[tikv] table:t3, index:id8(id8) range:[-inf,10), keep order:false, stats:pseudo + │ └─TableRowIDScan(Probe) 5548.89 cop[tikv] table:t3 keep order:false, stats:pseudo + └─Projection 5548.89 root executor__chunk_reuse.t3.id1->Column#17 + └─IndexMerge 5548.89 root type: union + ├─IndexRangeScan(Build) 3333.33 cop[tikv] table:t3, index:id2(id2) range:("4",+inf], keep order:false, stats:pseudo + ├─IndexRangeScan(Build) 3323.33 cop[tikv] table:t3, index:id8(id8) range:[-inf,7), keep order:false, stats:pseudo + └─TableRowIDScan(Probe) 5548.89 cop[tikv] table:t3 keep order:false, stats:pseudo +select id1 from t3 where id2 > '3' or id8 < 10 union (select id1 from t3 where id2 > '4' or id8 < 7); +id1 +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select id1 from t3 where id2 > '3' or id8 < 10 union (select id3 from t3 where id2 > '4' or id8 < 7); +id1 +NULL +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select id1 from t3 where id2 > '3' or id8 < 10 union (select id4 from t3 where id2 > '4' or id8 < 7); +id1 +NULL +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select id1 from t3 where id2 > '3' or id8 < 10 union (select id5 from t3 where id2 > '4' or id8 < 7); +id1 +NULL +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select id1 from t3 where id2 > '3' or id8 < 10 union (select id6 from t3 where id2 > '4' or id8 < 7); +id1 +NULL +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select id1 from t3 where id2 > '3' or id8 < 10 union (select id7 from t3 where id2 > '4' or id8 < 7); +id1 +NULL +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +set tidb_enable_clustered_index = OFF; +create table t4 (id1 int ,id2 char(10),id8 int ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`),key id2(id2),key id8(id8,id2)); +insert into t4 (id1,id2,id8)values(1,1,1),(2,2,2),(3,3,3); +explain format='brief' select id2 from t4 where id2 > '3' union (select id2 from t4 where id2 > '4'); +id estRows task access object operator info +HashAgg 5333.33 root group by:Column#19, funcs:firstrow(Column#19)->Column#19 +└─Union 6666.67 root + ├─IndexReader 3333.33 root index:IndexRangeScan + │ └─IndexRangeScan 3333.33 cop[tikv] table:t4, index:id2(id2) range:("3",+inf], keep order:false, stats:pseudo + └─IndexReader 3333.33 root index:IndexRangeScan + └─IndexRangeScan 3333.33 cop[tikv] table:t4, index:id2(id2) range:("4",+inf], keep order:false, stats:pseudo +select id2 from t4 where id2 > '3' union (select id2 from t4 where id2 > '4'); +id2 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select id1 from t3 where id2 > '3' or id8 < 10 union (select CHAR_LENGTH(id3) from t3 where id2 > '4' or id8 < 7); +id1 +NULL +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select id1 from t3 where id2 > '3' or id8 < 10 union (select CHAR_LENGTH(id2) from t3 where id2 > '4' or id8 < 7); +id1 +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +1 +select id1 from t3 where id2 > '3' or id8 < 10 union (select id2 from t3 where id2 > '4' or id8 < 7 and id3 is null); +id1 +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +select id1 from t3 where id2 > '3' or id8 < 10 union (select id2 from t3 where id2 > '4' or id8 < 7 and char_length(id3) > 0); +id1 +1 +2 +3 +select @@last_sql_use_alloc; +@@last_sql_use_alloc +0 +set tidb_enable_clustered_index = default; diff --git a/tests/integrationtest/r/executor/cluster_table.result b/tests/integrationtest/r/executor/cluster_table.result new file mode 100644 index 0000000000000..e42ad209ed983 --- /dev/null +++ b/tests/integrationtest/r/executor/cluster_table.result @@ -0,0 +1,77 @@ +drop table if exists test_func_encode_sql_digest; +create table test_func_encode_sql_digest(id int primary key, v int); +select tidb_encode_sql_digest("begin"); +tidb_encode_sql_digest("begin") +e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5 +select tidb_encode_sql_digest("select @@tidb_current_ts"); +tidb_encode_sql_digest("select @@tidb_current_ts") +58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44 +select tidb_encode_sql_digest("select id, v from test_func_decode_sql_digests where id = 1 for update"); +tidb_encode_sql_digest("select id, v from test_func_decode_sql_digests where id = 1 for update") +7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9 +select tidb_encode_sql_digest(null); +tidb_encode_sql_digest(null) +NULL +select tidb_encode_sql_digest(); +Error 1582 (42000): Incorrect parameter count in the call to native function 'tidb_encode_sql_digest' +select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 1;'); +(select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 1;') +1 +select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 1 ;'); +(select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 1 ;') +1 +select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 2 ;'); +(select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 2 ;') +1 +drop user if exists 'testuser'@'localhost'; +create user 'testuser'@'localhost'; +select tidb_decode_sql_digests('["aa"]'); +Error 1227 (42000): Access denied; you need (at least one of) the PROCESS privilege(s) for this operation +grant process on *.* to 'testuser'@'localhost'; +select tidb_decode_sql_digests('["aa"]'); +tidb_decode_sql_digests('["aa"]') +[null] +drop user 'testuser'@'localhost'; +set global tidb_enable_stmt_summary = 1; +select @@global.tidb_enable_stmt_summary; +@@global.tidb_enable_stmt_summary +1 +drop table if exists test_func_decode_sql_digests; +create table test_func_decode_sql_digests(id int primary key, v int); +begin; +select @@tidb_current_ts; +select id, v from test_func_decode_sql_digests where id = 1 for update; +id v +rollback; +select tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5","58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44","7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9"]', 0); +tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5","58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44","7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9"]', 0) +[null,"select @@tidb_current_ts","select `id` , `v` from `test_func_decode_sql_digests` where `id` = ? for update"] +select tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5","58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44","7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9"]', 24); +tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5","58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44","7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9"]', 24) +[null,"select @@tidb_current_ts","select `id` , `v` from `..."] +select tidb_decode_sql_digests('[]'); +tidb_decode_sql_digests('[]') +[] +select tidb_decode_sql_digests(null); +tidb_decode_sql_digests(null) +NULL +select tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5",1,null,"58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44",{"a":1},[2],"7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9","","abcde"]'); +tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5",1,null,"58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44",{"a":1},[2],"7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9","","abcde" +[null,null,null,"select @@tidb_current_ts",null,null,"select `id` , `v` from `test_func_decode_sql_digests` where `id` = ? for update",null,null] +select tidb_decode_sql_digests('{"a":1}'); +tidb_decode_sql_digests('{"a":1}') +NULL +show warnings; +Level Code Message +Warning 1210 The argument can't be unmarshalled as JSON array: '{"a":1}' +select tidb_decode_sql_digests('aabbccdd'); +tidb_decode_sql_digests('aabbccdd') +NULL +show warnings; +Level Code Message +Warning 1210 The argument can't be unmarshalled as JSON array: 'aabbccdd' +select tidb_decode_sql_digests('a', 1, 2); +Error 1582 (42000): Incorrect parameter count in the call to native function 'tidb_decode_sql_digests' +select tidb_decode_sql_digests(); +Error 1582 (42000): Incorrect parameter count in the call to native function 'tidb_decode_sql_digests' +set global tidb_enable_stmt_summary = default; diff --git a/tests/integrationtest/r/executor/compact_table.result b/tests/integrationtest/r/executor/compact_table.result new file mode 100644 index 0000000000000..d4cff588896bc --- /dev/null +++ b/tests/integrationtest/r/executor/compact_table.result @@ -0,0 +1,42 @@ +drop table if exists bar, foo; +alter table test compact tiflash replica; +Error 1046 (3D000): No database selected +alter table executor__compact_table.foo compact tiflash replica; +Error 1146 (42S02): Table 'executor__compact_table.foo' doesn't exist +use executor__compact_table; +alter table bar compact; +Error 1146 (42S02): Table 'executor__compact_table.bar' doesn't exist +drop table if exists t; +create table t(a int); +alter table t compact tiflash replica; +show warnings; +Level Code Message +Warning 1105 compact skipped: no tiflash replica in the table +alter table executor__compact_table.t compact; +show warnings; +Level Code Message +Warning 1105 compact skipped: no tiflash replica in the table +alter table executor__compact_table.t compact; +show warnings; +Level Code Message +Warning 1105 compact skipped: no tiflash replica in the table +drop table if exists t; +create table t(a int); +alter table t compact partition p1,p2 tiflash replica; +Error 1105 (HY000): table:t is not a partition table, but user specify partition name list:[p1 p2] +drop table if exists t; +CREATE TABLE t ( +id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, +fname VARCHAR(25) NOT NULL, +lname VARCHAR(25) NOT NULL, +store_id INT NOT NULL, +department_id INT NOT NULL +) +PARTITION BY RANGE(id) ( +PARTITION p0 VALUES LESS THAN (5), +PARTITION p1 VALUES LESS THAN (10), +PARTITION p2 VALUES LESS THAN (15), +PARTITION p3 VALUES LESS THAN MAXVALUE +); +alter table t compact partition p1,p2,p4 tiflash replica; +Error 1735 (HY000): Unknown partition 'p4' in table 't' diff --git a/tests/integrationtest/r/executor/cte.result b/tests/integrationtest/r/executor/cte.result new file mode 100644 index 0000000000000..2721c2f4b9c10 --- /dev/null +++ b/tests/integrationtest/r/executor/cte.result @@ -0,0 +1,407 @@ +with recursive cte1 as (select 1 c1 union all select c1 + 1 c1 from cte1 where c1 < 5) select * from cte1; +c1 +1 +2 +3 +4 +5 +with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < 10) select * from cte1 order by c1; +c1 +1 +2 +2 +3 +3 +4 +4 +5 +5 +6 +6 +7 +7 +8 +8 +9 +9 +10 +10 +with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < 3 union all select c1 + 2 c1 from cte1 where c1 < 5) select * from cte1 order by c1; +c1 +1 +2 +2 +3 +3 +3 +4 +4 +5 +5 +5 +6 +6 +drop table if exists t1; +create table t1(a int); +insert into t1 values(1); +insert into t1 values(2); +SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0) SELECT * FROM qn WHERE b=a); +a +1 +SELECT * FROM t1 dt WHERE EXISTS( WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0 or b = 1) SELECT * FROM qn WHERE b=a ); +a +1 +2 +with recursive c(p) as (select 1), cte(a, b) as (select 1, 1 union select a+1, 1 from cte, c where a < 5) select * from cte order by 1, 2; +a b +1 1 +2 1 +3 1 +4 1 +5 1 +with recursive cte1(c1) as (select 1 union select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1; +c1 +1 +2 +3 +with recursive cte1(c1) as (select 1 union all select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1; +c1 +1 +2 +3 +drop table if exists t1; +create table t1(c1 int, c2 int); +insert into t1 values(1, 1), (1, 2), (2, 2); +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from t1) select * from cte1 order by c1; +c1 +1 +2 +3 +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(1), (1), (1), (2), (2), (2); +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 4) select * from cte1 order by c1; +c1 +1 +2 +3 +4 +set @@cte_max_recursion_depth = -1; +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 100) select * from cte1; +Error 3636 (HY000): Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value +with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +c1 +1 +2 +with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +c1 +1 +2 +set @@cte_max_recursion_depth = 0; +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1; +Error 3636 (HY000): Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1; +Error 3636 (HY000): Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value +with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +c1 +1 +2 +with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +c1 +1 +2 +set @@cte_max_recursion_depth = 1; +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1; +c1 +1 +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1; +c1 +1 +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 2) select * from cte1; +Error 3636 (HY000): Recursive query aborted after 2 iterations. Try increasing @@cte_max_recursion_depth to a larger value +with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +c1 +1 +2 +with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +c1 +1 +2 +set @@cte_max_recursion_depth = default; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 0) select * from cte1; +c1 +1 +2 +3 +4 +5 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 1) select * from cte1; +c1 +2 +3 +4 +5 +6 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 10) select * from cte1; +c1 +11 +12 +13 +14 +15 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 995) select * from cte1; +c1 +996 +997 +998 +999 +1000 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 6) select * from cte1; +c1 +7 +8 +9 +10 +11 +set cte_max_recursion_depth=2; +with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; +c1 +2 +with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; +Error 3636 (HY000): Recursive query aborted after 3 iterations. Try increasing @@cte_max_recursion_depth to a larger value +set cte_max_recursion_depth=1000; +with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 996) select * from cte1; +c1 +996 +997 +998 +999 +1000 +with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 997) select * from cte1; +Error 3636 (HY000): Recursive query aborted after 1001 iterations. Try increasing @@cte_max_recursion_depth to a larger value +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1; +c1 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 10) select * from cte1; +c1 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 order by dt1.c1, dt2.c1; +c1 c1 +2 2 +2 3 +3 2 +3 3 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1 order by dt1.c1, dt1.c1; +c1 c1 +2 2 +3 3 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where c1 in (select 2); +c1 +2 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 dt where c1 in (select c1 from cte1 where 1 = dt.c1 - 1); +c1 +2 +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where cte1.c1 = (select dt1.c1 from cte1 dt1 where dt1.c1 = cte1.c1); +c1 +2 +3 +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(1), (2), (3); +with recursive cte1(c1) as (select c1 from t1 limit 1 offset 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1; +Error 1221 (HY000): Incorrect usage of UNION and LIMIT +with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 1 offset 1) select * from cte1; +c1 +2 +with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 0 offset 1) select * from cte1; +c1 +with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 2 offset 0) select * from cte1; +c1 +1 +2 +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12), (13), (14), (15), (16), (17), (18), (19), (20), (21), (22), (23), (24), (25), (26), (27), (28), (29), (30), (31), (32), (33), (34), (35), (36), (37), (38), (39), (40), (41), (42), (43), (44), (45), (46), (47), (48), (49), (50), (51), (52), (53), (54), (55), (56), (57), (58), (59), (60), (61), (62), (63), (64), (65), (66), (67), (68), (69), (70), (71), (72), (73), (74), (75), (76), (77), (78), (79), (80), (81), (82), (83), (84), (85), (86), (87), (88), (89), (90), (91), (92), (93), (94), (95), (96), (97), (98), (99), (100), (101), (102), (103), (104), (105), (106), (107), (108), (109), (110), (111), (112), (113), (114), (115), (116), (117), (118), (119), (120), (121), (122), (123), (124), (125), (126), (127), (128), (129), (130), (131), (132), (133), (134), (135), (136), (137), (138), (139), (140), (141), (142), (143), (144), (145), (146), (147), (148), (149), (150), (151), (152), (153), (154), (155), (156), (157), (158), (159), (160), (161), (162), (163), (164), (165), (166), (167), (168), (169), (170), (171), (172), (173), (174), (175), (176), (177), (178), (179), (180), (181), (182), (183), (184), (185), (186), (187), (188), (189), (190), (191), (192), (193), (194), (195), (196), (197), (198), (199), (200), (201), (202), (203), (204), (205), (206), (207), (208), (209), (210), (211), (212), (213), (214), (215), (216), (217), (218), (219), (220), (221), (222), (223), (224), (225), (226), (227), (228), (229), (230), (231), (232), (233), (234), (235), (236), (237), (238), (239), (240), (241), (242), (243), (244), (245), (246), (247), (248), (249), (250), (251), (252), (253), (254), (255), (256), (257), (258), (259), (260), (261), (262), (263), (264), (265), (266), (267), (268), (269), (270), (271), (272), (273), (274), (275), (276), (277), (278), (279), (280), (281), (282), (283), (284), (285), (286), (287), (288), (289), (290), (291), (292), (293), (294), (295), (296), (297), (298), (299); +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1) select * from cte1; +c1 +0 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1 offset 100) select * from cte1; +c1 +100 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 5 offset 100) select * from cte1; +c1 +100 +101 +102 +103 +104 +with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1; +c1 +1 +2 +with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; +c1 c1 +1 1 +2 2 +with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1; +c1 +with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; +c1 c1 +with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1; +c1 +100 +101 +102 +with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; +c1 c1 +100 100 +101 101 +102 102 +set cte_max_recursion_depth = 0; +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(0); +with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 0) select * from cte1; +c1 +with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 1) select * from cte1; +Error 3636 (HY000): Recursive query aborted after 1 iterations. Try increasing @@cte_max_recursion_depth to a larger value +set cte_max_recursion_depth = 1000; +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(1), (2), (3); +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 2) select * from cte1; +c1 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; +c1 +3 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 2) select * from cte1; +c1 +3 +4 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 2) select * from cte1; +c1 +3 +4 +5 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 2) select * from cte1; +c1 +3 +4 +5 +6 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 3) select * from cte1; +c1 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; +c1 +4 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 3) select * from cte1; +c1 +4 +5 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 3) select * from cte1; +c1 +4 +5 +6 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 3) select * from cte1; +c1 +4 +5 +6 +7 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 4) select * from cte1; +c1 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 4) select * from cte1; +c1 +5 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 4) select * from cte1; +c1 +5 +6 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 4) select * from cte1; +c1 +5 +6 +7 +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 4) select * from cte1; +c1 +5 +6 +7 +8 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 2) select * from cte1; +c1 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; +c1 +3 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 2) select * from cte1; +c1 +3 +2 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 2) select * from cte1; +c1 +3 +2 +3 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 2) select * from cte1; +c1 +3 +2 +3 +4 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 3) select * from cte1; +c1 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; +c1 +2 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 3) select * from cte1; +c1 +2 +3 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 3) select * from cte1; +c1 +2 +3 +4 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 3) select * from cte1; +c1 +2 +3 +4 +3 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 4) select * from cte1; +c1 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 4) select * from cte1; +c1 +3 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 4) select * from cte1; +c1 +3 +4 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 4) select * from cte1; +c1 +3 +4 +3 +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 4) select * from cte1; +c1 +3 +4 +3 +4 +set cte_max_recursion_depth = default; +drop table if exists executor__cte.t; +drop view if exists executor__cte.v; +create database if not exists executor__cte1; +create table executor__cte.t (a int); +create table executor__cte1.t (a int); +insert into executor__cte.t values (1); +insert into executor__cte1.t values (2); +create definer='root'@'localhost' view executor__cte.v as with tt as (select * from t) select * from tt; +select * from executor__cte.v; +a +1 +use executor__cte1; +select * from executor__cte.v; +a +1 +use executor__cte; +drop database executor__cte1; diff --git a/tests/integrationtest/r/executor/executor_txn.result b/tests/integrationtest/r/executor/executor_txn.result new file mode 100644 index 0000000000000..d1083e9cd6351 --- /dev/null +++ b/tests/integrationtest/r/executor/executor_txn.result @@ -0,0 +1,169 @@ +drop table if exists t; +create table t(id int, a int, unique index idx(id)); +begin pessimistic; +insert into t values (1,1); +savepoint s1; +insert into t values (2,2); +rollback to s1; +insert into t values (2,2); +select * from t; +id a +1 1 +2 2 +rollback to s1; +select * from t; +id a +1 1 +commit; +select * from t; +id a +1 1 +delete from t; +insert into t values (1,1); +begin pessimistic; +delete from t where id = 1; +savepoint s1; +insert into t values (1,2); +rollback to s1; +select * from t; +id a +commit; +select * from t; +id a +drop table if exists t; +CREATE TABLE t (a enum('B','C') NOT NULL,UNIQUE KEY idx_1 (a),KEY idx_2 (a)); +begin pessimistic; +savepoint sp0; +insert ignore into t values ( 'B' ),( 'C' ); +update t set a = 'C' where a = 'B'; +Error 1062 (23000): Duplicate entry 'C' for key 't.idx_1' +select * from t where a = 'B' for update; +a +B +rollback to sp0; +delete from t where a = 'B' ; +rollback; +set session tidb_txn_mode='optimistic'; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +commit; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +set session tidb_txn_mode='pessimistic'; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +commit; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +set session tidb_txn_mode=''; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +commit; +select * from tmp1 order by id; +id u v +1 11 101 +2 22 202 +set session tidb_txn_mode='optimistic'; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +id u v +2 22 202 +3 33 303 +rollback to sp1; +select * from tmp1 order by id; +id u v +2 22 202 +commit; +select * from tmp1 order by id; +id u v +set session tidb_txn_mode='pessimistic'; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +id u v +2 22 202 +3 33 303 +rollback to sp1; +select * from tmp1 order by id; +id u v +2 22 202 +commit; +select * from tmp1 order by id; +id u v +set session tidb_txn_mode=''; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +id u v +2 22 202 +3 33 303 +rollback to sp1; +select * from tmp1 order by id; +id u v +2 22 202 +commit; +select * from tmp1 order by id; +id u v +set session tidb_txn_mode=default; diff --git a/tests/integrationtest/r/executor/explain.result b/tests/integrationtest/r/executor/explain.result new file mode 100644 index 0000000000000..b1753240540db --- /dev/null +++ b/tests/integrationtest/r/executor/explain.result @@ -0,0 +1,302 @@ +drop table if exists t; +create table t (v int); +explain format = 'brief' select * from t t1, t t2; +id estRows task access object operator info +HashJoin 100000000.00 root CARTESIAN inner join +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select * from t t1 where exists (select 1 from t t2 where t2.v > t1.v); +id estRows task access object operator info +HashJoin 7992.00 root CARTESIAN semi join, other cond:gt(executor__explain.t.v, executor__explain.t.v) +├─TableReader(Build) 9990.00 root data:Selection +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__explain.t.v)) +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__explain.t.v)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select * from t t1 where exists (select 1 from t t2 where t2.v in (t1.v+1, t1.v+2)); +id estRows task access object operator info +HashJoin 8000.00 root CARTESIAN semi join, other cond:in(executor__explain.t.v, plus(executor__explain.t.v, 1), plus(executor__explain.t.v, 2)) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +explain format = 'brief' select * from t t1, t t2 where t1.v = t2.v; +id estRows task access object operator info +HashJoin 12487.50 root inner join, equal:[eq(executor__explain.t.v, executor__explain.t.v)] +├─TableReader(Build) 9990.00 root data:Selection +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__explain.t.v)) +│ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +└─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__explain.t.v)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +drop table if exists t; +create table t (a int); +explain analyze insert into t select 1; +select * from t; +a +1 +explain analyze update t set a=2 where a=1; +select * from t; +a +2 +explain format = 'brief' insert into t select 1; +select * from t; +a +2 +explain analyze insert into t select 1; +explain analyze replace into t values (3); +select * from t order by a; +a +1 +2 +3 +desc format='brief' select * from information_schema.statements_summary; +id estRows task access object operator info +MemTableScan 10000.00 root table:STATEMENTS_SUMMARY +desc format='brief' select * from information_schema.statements_summary where digest is null; +id estRows task access object operator info +Selection 8000.00 root isnull(Column#5) +└─MemTableScan 10000.00 root table:STATEMENTS_SUMMARY +desc format='brief' select * from information_schema.statements_summary where digest = 'abcdefg'; +id estRows task access object operator info +MemTableScan 10000.00 root table:STATEMENTS_SUMMARY digests: ["abcdefg"] +desc format='brief' select * from information_schema.statements_summary where digest in ('a','b','c'); +id estRows task access object operator info +MemTableScan 10000.00 root table:STATEMENTS_SUMMARY digests: ["a","b","c"] +drop table if exists tt123; +CREATE TABLE tt123 ( +id int(11) NOT NULL, +a bigint(20) DEFAULT NULL, +b char(20) DEFAULT NULL, +c datetime DEFAULT NULL, +d double DEFAULT NULL, +e json DEFAULT NULL, +f decimal(40,6) DEFAULT NULL, +PRIMARY KEY (id) /*T![clustered_index] CLUSTERED */, +KEY a (a), +KEY b (b), +KEY c (c), +KEY d (d), +KEY f (f) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +explain format='brief' select /*+ inl_hash_join(t1) */ * from tt123 t1 join tt123 t2 on t1.b=t2.e; +id estRows task access object operator info +Projection 12500.00 root executor__explain.tt123.id, executor__explain.tt123.a, executor__explain.tt123.b, executor__explain.tt123.c, executor__explain.tt123.d, executor__explain.tt123.e, executor__explain.tt123.f, executor__explain.tt123.id, executor__explain.tt123.a, executor__explain.tt123.b, executor__explain.tt123.c, executor__explain.tt123.d, executor__explain.tt123.e, executor__explain.tt123.f +└─HashJoin 12500.00 root inner join, equal:[eq(executor__explain.tt123.e, Column#15)] + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─Projection(Probe) 10000.00 root executor__explain.tt123.id, executor__explain.tt123.a, executor__explain.tt123.b, executor__explain.tt123.c, executor__explain.tt123.d, executor__explain.tt123.e, executor__explain.tt123.f, cast(executor__explain.tt123.b, json BINARY)->Column#15 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +drop table if exists t; +create table t (a int primary key); +insert into t values (2); +set @@tidb_constraint_check_in_place=1; +explain analyze insert into t values (1), (2), (3); +Error 1062 (23000): Duplicate entry '2' for key 't.PRIMARY' +select * from t; +a +2 +set @@tidb_constraint_check_in_place=DEFAULT; +drop table if exists t; +create table t(a int); +set @@session.tidb_enable_non_prepared_plan_cache = 1; +select * from t limit 1; +a +select * from t limit 1; +a +explain format = 'plan_cache' select * from (select * from t) t1 limit 1; +id estRows task access object operator info +Limit_8 1.00 root offset:0, count:1 +└─TableReader_12 1.00 root data:Limit_11 + └─Limit_11 1.00 cop[tikv] offset:0, count:1 + └─TableFullScan_10 1.00 cop[tikv] table:t keep order:false, stats:pseudo +show warnings; +Level Code Message +Warning 1105 skip non-prepared plan-cache: queries that have sub-queries are not supported +explain format = 'plan_cache' select * from (select * from t) t1 limit 1; +id estRows task access object operator info +Limit_8 1.00 root offset:0, count:1 +└─TableReader_12 1.00 root data:Limit_11 + └─Limit_11 1.00 cop[tikv] offset:0, count:1 + └─TableFullScan_10 1.00 cop[tikv] table:t keep order:false, stats:pseudo +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain analyze format = 'plan_cache' select * from (select * from t) t1 limit 1; +show warnings; +Level Code Message +Warning 1105 skip non-prepared plan-cache: queries that have sub-queries are not supported +explain analyze format = 'plan_cache' select * from (select * from t) t1 limit 1; +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'plan_cache' select * from t; +id estRows task access object operator info +TableReader_5 10000.00 root data:TableFullScan_4 +└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +show warnings; +Level Code Message +explain format = 'plan_cache' select * from t; +id estRows task access object operator info +TableReader_5 10000.00 root data:TableFullScan_4 +└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +explain analyze format = 'plan_cache' select * from t; +show warnings; +Level Code Message +explain analyze format = 'plan_cache' select * from t; +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +explain select * from t; +id estRows task access object operator info +TableReader_5 10000.00 root data:TableFullScan_4 +└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'brief' select * from t; +id estRows task access object operator info +TableReader 10000.00 root data:TableFullScan +└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'dot' select * from t; +dot contents + +digraph TableReader_5 { +subgraph cluster5{ +node [style=filled, color=lightgrey] +color=black +label = "root" +"TableReader_5" +} +subgraph cluster4{ +node [style=filled, color=lightgrey] +color=black +label = "cop" +"TableFullScan_4" +} +"TableReader_5" -> "TableFullScan_4" +} + +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'hint' select * from t; +hint +use_index(@`sel_1` `executor__explain`.`t` ) +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'row' select * from t; +id estRows task access object operator info +TableReader_5 10000.00 root data:TableFullScan_4 +└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'verbose' select * from t; +id estRows estCost task access object operator info +TableReader_5 10000.00 177906.67 root data:TableFullScan_4 +└─TableFullScan_4 10000.00 2035000.00 cop[tikv] table:t keep order:false, stats:pseudo +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'traditional' select * from t; +id estRows task access object operator info +TableReader_5 10000.00 root data:TableFullScan_4 +└─TableFullScan_4 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'binary' select * from t; +binary plan +zQFYCsoBCg1UYWJsZVJlYWRlcl81EncKD1QBEVBGdWxsU2Nhbl80IQEAAAA4DT9BKQABAfBGiMNAOAJAAkoYChYKEWV4ZWN1dG9yX19leHBsYWluEgF0Uh5rZWVwIG9yZGVyOmZhbHNlLCBzdGF0czpwc2V1ZG9w//////8BAwQBeAEGBQEgASFVVVVVlbcFHWYoAUABUhRkYXRhOlQ2kgAAcAUzAQEsAXj///////////8B +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'tidb_json' select * from t; +TiDB_JSON +[ + { + "id": "TableReader_5", + "estRows": "10000.00", + "taskType": "root", + "operatorInfo": "data:TableFullScan_4", + "subOperators": [ + { + "id": "TableFullScan_4", + "estRows": "10000.00", + "taskType": "cop[tikv]", + "accessObject": "table:t", + "operatorInfo": "keep order:false, stats:pseudo" + } + ] + } +] + +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +explain format = 'cost_trace' select * from t; +id estRows estCost costFormula task access object operator info +TableReader_5 10000.00 177906.67 ((scan(10000*logrowsize(32)*tikv_scan_factor(40.7))) + (net(10000*rowsize(16)*tidb_kv_net_factor(3.96))))/15.00 root data:TableFullScan_4 +└─TableFullScan_4 10000.00 2035000.00 scan(10000*logrowsize(32)*tikv_scan_factor(40.7)) cop[tikv] table:t keep order:false, stats:pseudo +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set @@session.tidb_enable_non_prepared_plan_cache = DEFAULT; +drop table if exists t; +drop view if exists v; +drop user if exists 'explain'@'%'; +create table t (id int); +create view v as select * from t; +create user 'explain'@'%'; +grant select on executor__explain.v to 'explain'@'%'; +show databases; +Database +INFORMATION_SCHEMA +executor__explain +use executor__explain; +select * from v; +id +explain format = 'brief' select * from v; +Error 1345 (HY000): EXPLAIN/SHOW can not be issued; lacking privileges for underlying table +grant show view on executor__explain.v to 'explain'@'%'; +explain format = 'brief' select * from v; +id estRows task access object operator info +TableReader 10000.00 root data:TableFullScan +└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +revoke select on executor__explain.v from 'explain'@'%'; +explain format = 'brief' select * from v; +Error 1142 (42000): SELECT command denied to user 'explain'@'%' for table 'v' +create table t1 (i int); +create table t2 (j int); +create table t3 (k int, secret int); +create view v1 as select * from t1; +create view v2 as select * from v1, t2; +create view v3 as select k from t3; +grant select, show view on executor__explain.v2 to 'explain'@'%'; +grant show view on executor__explain.v1 to 'explain'@'%'; +grant select, show view on executor__explain.t3 to 'explain'@'%'; +grant select, show view on executor__explain.v3 to 'explain'@'%'; +explain select * from v1; +Error 1142 (42000): SELECT command denied to user 'explain'@'%' for table 'v1' +explain select * from v2; +Error 1345 (HY000): EXPLAIN/SHOW can not be issued; lacking privileges for underlying table +explain select * from t3; +id estRows task access object operator info +TableReader_5 10000.00 root data:TableFullScan_4 +└─TableFullScan_4 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo +explain select * from v3; +id estRows task access object operator info +TableReader_7 10000.00 root data:TableFullScan_6 +└─TableFullScan_6 10000.00 cop[tikv] table:t3 keep order:false, stats:pseudo diff --git a/tests/integrationtest/r/executor/explainfor.result b/tests/integrationtest/r/executor/explainfor.result new file mode 100644 index 0000000000000..b6a8f1d0b2f34 --- /dev/null +++ b/tests/integrationtest/r/executor/explainfor.result @@ -0,0 +1,836 @@ +desc format='brief' select * from METRICS_SCHEMA.tidb_query_duration where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13' ; +id estRows task access object operator info +MemTableScan 10000.00 root table:tidb_query_duration PromQL:histogram_quantile(0.9, sum(rate(tidb_server_handle_query_duration_seconds_bucket{}[60s])) by (le,sql_type,instance)), start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, step:1m0s +desc format='brief' select * from METRICS_SCHEMA.up where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13' ; +id estRows task access object operator info +MemTableScan 10000.00 root table:up PromQL:up{}, start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, step:1m0s +desc format='brief' select * from information_schema.cluster_log where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +id estRows task access object operator info +MemTableScan 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13 +desc format='brief' select * from information_schema.cluster_log where level in ('warn','error') and time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +id estRows task access object operator info +MemTableScan 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, log_levels:["error","warn"] +desc format='brief' select * from information_schema.cluster_log where type in ('high_cpu_1','high_memory_1') and time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +id estRows task access object operator info +MemTableScan 10000.00 root table:CLUSTER_LOG start_time:2019-12-23 16:10:13, end_time:2019-12-23 16:30:13, node_types:["high_cpu_1","high_memory_1"] +desc format='brief' select * from information_schema.slow_query; +id estRows task access object operator info +MemTableScan 10000.00 root table:SLOW_QUERY only search in the current 'tidb-slow.log' file +desc format='brief' select * from information_schema.slow_query where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +id estRows task access object operator info +MemTableScan 10000.00 root table:SLOW_QUERY start_time:2019-12-23 16:10:13.000000, end_time:2019-12-23 16:30:13.000000 +set @@time_zone = '+00:00'; +desc format='brief' select * from information_schema.slow_query where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +id estRows task access object operator info +MemTableScan 10000.00 root table:SLOW_QUERY start_time:2019-12-23 16:10:13.000000, end_time:2019-12-23 16:30:13.000000 +set @@time_zone = default; +desc format='brief' select * from information_schema.cluster_config where type in ('tikv', 'tidb'); +id estRows task access object operator info +MemTableScan 10000.00 root table:CLUSTER_CONFIG node_types:["tidb","tikv"] +desc format='brief' select * from information_schema.cluster_config where instance='192.168.1.7:2379'; +id estRows task access object operator info +MemTableScan 10000.00 root table:CLUSTER_CONFIG instances:["192.168.1.7:2379"] +desc format='brief' select * from information_schema.cluster_config where type='tidb' and instance='192.168.1.7:2379'; +id estRows task access object operator info +MemTableScan 10000.00 root table:CLUSTER_CONFIG node_types:["tidb"], instances:["192.168.1.7:2379"] +desc format='brief' select * from information_schema.inspection_result where rule = 'ddl' and rule = 'config'; +id estRows task access object operator info +MemTableScan 10000.00 root table:INSPECTION_RESULT skip_inspection:true +desc format='brief' select * from information_schema.inspection_result where rule in ('ddl', 'config'); +id estRows task access object operator info +MemTableScan 10000.00 root table:INSPECTION_RESULT rules:["config","ddl"], items:[] +desc format='brief' select * from information_schema.inspection_result where item in ('ddl.lease', 'raftstore.threadpool'); +id estRows task access object operator info +MemTableScan 10000.00 root table:INSPECTION_RESULT rules:[], items:["ddl.lease","raftstore.threadpool"] +desc format='brief' select * from information_schema.inspection_result where item in ('ddl.lease', 'raftstore.threadpool') and rule in ('ddl', 'config'); +id estRows task access object operator info +MemTableScan 10000.00 root table:INSPECTION_RESULT rules:["config","ddl"], items:["ddl.lease","raftstore.threadpool"] +desc format='brief' select * from information_schema.inspection_rules where type='inspection'; +id estRows task access object operator info +MemTableScan 10000.00 root table:INSPECTION_RULES node_types:["inspection"] +desc format='brief' select * from information_schema.inspection_rules where type='inspection' or type='summary'; +id estRows task access object operator info +MemTableScan 10000.00 root table:INSPECTION_RULES node_types:["inspection","summary"] +desc format='brief' select * from information_schema.inspection_rules where type='inspection' and type='summary'; +id estRows task access object operator info +MemTableScan 10000.00 root table:INSPECTION_RULES skip_request: true +set tidb_enable_prepared_plan_cache=1; +drop table if exists t; +create table t(a int, b int, c int generated always as (a+b) stored); +insert into t(a,b) values(1,1); +begin; +update t set b = 2 where a = 1; +prepare stmt from 'select b from t where a > ?'; +set @p = 0; +execute stmt using @p; +b +2 +set @p = 1; +execute stmt using @p; +b +insert into t(a,b,c) values(3,3,3); +Error 3105 (HY000): The value specified for generated column 'c' in table 't' is not allowed. +rollback; +set tidb_enable_prepared_plan_cache=default; +desc format='brief' select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TABLE_STORAGE_STATS schema:["information_schema"] +desc format='brief' select * from information_schema.TABLE_STORAGE_STATS where TABLE_NAME = 'schemata'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TABLE_STORAGE_STATS table:["schemata"] +desc format='brief' select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema' and TABLE_NAME = 'schemata'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TABLE_STORAGE_STATS schema:["information_schema"], table:["schemata"] +desc format='brief' select * from information_schema.inspection_summary where rule='ddl'; +id estRows task access object operator info +Selection 8000.00 root eq(Column#1, "ddl") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"] +desc format='brief' select * from information_schema.inspection_summary where 'ddl'=rule or rule='config'; +id estRows task access object operator info +Selection 8000.00 root or(eq("ddl", Column#1), eq(Column#1, "config")) +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["config","ddl"] +desc format='brief' select * from information_schema.inspection_summary where 'ddl'=rule or rule='config' or rule='slow_query'; +id estRows task access object operator info +Selection 8000.00 root or(eq("ddl", Column#1), or(eq(Column#1, "config"), eq(Column#1, "slow_query"))) +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["config","ddl","slow_query"] +desc format='brief' select * from information_schema.inspection_summary where (rule='config' or rule='slow_query') and (metrics_name='metric_name3' or metrics_name='metric_name1'); +id estRows task access object operator info +Selection 8000.00 root or(eq(Column#1, "config"), eq(Column#1, "slow_query")), or(eq(Column#3, "metric_name3"), eq(Column#3, "metric_name1")) +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["config","slow_query"], metric_names:["metric_name1","metric_name3"] +desc format='brief' select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query'); +id estRows task access object operator info +Selection 8000.00 root in(Column#1, "ddl", "slow_query") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"] +desc format='brief' select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query') and metrics_name='metric_name1'; +id estRows task access object operator info +Selection 8000.00 root eq(Column#3, "metric_name1"), in(Column#1, "ddl", "slow_query") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"], metric_names:["metric_name1"] +desc format='brief' select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query') and metrics_name in ('metric_name1', 'metric_name2'); +id estRows task access object operator info +Selection 8000.00 root in(Column#1, "ddl", "slow_query"), in(Column#3, "metric_name1", "metric_name2") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["ddl","slow_query"], metric_names:["metric_name1","metric_name2"] +desc format='brief' select * from information_schema.inspection_summary where rule='ddl' and metrics_name in ('metric_name1', 'metric_name2'); +id estRows task access object operator info +Selection 8000.00 root eq(Column#1, "ddl"), in(Column#3, "metric_name1", "metric_name2") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"], metric_names:["metric_name1","metric_name2"] +desc format='brief' select * from information_schema.inspection_summary where rule='ddl' and metrics_name='metric_NAME3'; +id estRows task access object operator info +Selection 8000.00 root eq(Column#1, "ddl"), eq(Column#3, "metric_NAME3") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["ddl"], metric_names:["metric_name3"] +desc format='brief' select * from information_schema.inspection_summary where rule in ('ddl', 'config') and rule in ('slow_query', 'config'); +id estRows task access object operator info +Selection 8000.00 root in(Column#1, "ddl", "config"), in(Column#1, "slow_query", "config") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["config"] +desc format='brief' select * from information_schema.inspection_summary where metrics_name in ('metric_name1', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name4') and rule in ('ddl', 'config') and rule in ('slow_query', 'config') and quantile in (0.80, 0.90); +id estRows task access object operator info +Selection 8000.00 root in(Column#1, "ddl", "config"), in(Column#1, "slow_query", "config"), in(Column#3, "metric_name1", "metric_name4"), in(Column#3, "metric_name5", "metric_name4") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY rules:["config"], metric_names:["metric_name4"], quantiles:[0.800000,0.900000] +desc format='brief' select * from information_schema.inspection_summary where metrics_name in ('metric_name1', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name1') and metrics_name in ('metric_name1', 'metric_name3'); +id estRows task access object operator info +Selection 8000.00 root in(Column#3, "metric_name1", "metric_name3"), in(Column#3, "metric_name1", "metric_name4"), in(Column#3, "metric_name5", "metric_name1"), in(Column#3, "metric_name5", "metric_name4") +└─MemTableScan 10000.00 root table:INSPECTION_SUMMARY skip_inspection: true +desc format='brief' select * from information_schema.TIFLASH_TABLES where TIFLASH_INSTANCE = '192.168.1.7:3930'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TIFLASH_TABLES tiflash_instances:["192.168.1.7:3930"] +desc format='brief' select * from information_schema.TIFLASH_SEGMENTS where TIFLASH_INSTANCE = '192.168.1.7:3930'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TIFLASH_SEGMENTS tiflash_instances:["192.168.1.7:3930"] +desc format='brief' select * from information_schema.TIFLASH_TABLES where TIDB_DATABASE = 'test'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TIFLASH_TABLES tidb_databases:["test"] +desc format='brief' select * from information_schema.TIFLASH_SEGMENTS where TIDB_DATABASE = 'test'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TIFLASH_SEGMENTS tidb_databases:["test"] +desc format='brief' select * from information_schema.TIFLASH_TABLES where TIDB_TABLE = 't'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TIFLASH_TABLES tidb_tables:["t"] +desc format='brief' select * from information_schema.TIFLASH_SEGMENTS where TIDB_TABLE = 't'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TIFLASH_SEGMENTS tidb_tables:["t"] +desc format='brief' select * from information_schema.TIFLASH_TABLES where TIFLASH_INSTANCE = '192.168.1.7:3930' and TIDB_DATABASE = 'test' and TIDB_TABLE = 't'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TIFLASH_TABLES tiflash_instances:["192.168.1.7:3930"], tidb_databases:["test"], tidb_tables:["t"] +desc format='brief' select * from information_schema.TIFLASH_SEGMENTS where TIFLASH_INSTANCE = '192.168.1.7:3930' and TIDB_DATABASE = 'test' and TIDB_TABLE = 't'; +id estRows task access object operator info +MemTableScan 10000.00 root table:TIFLASH_SEGMENTS tiflash_instances:["192.168.1.7:3930"], tidb_databases:["test"], tidb_tables:["t"] +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t1, t2; +CREATE TABLE `t1` (a int); +CREATE TABLE `t2` (a int); +insert into t1 values(1), (2); +insert into t2 values(1), (3); +prepare stmt from 'select * from t1 where a > ? union select * from t2 where a > ?;'; +set @a=0, @b=1; +execute stmt using @a, @b; +a +1 +2 +3 +execute stmt using @b, @a; +a +1 +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt using @b, @b; +a +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt using @a, @a; +a +1 +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt from 'select * from t1 where a > ? union all select * from t2 where a > ?;'; +set @a=0, @b=1; +execute stmt using @a, @b; +a +1 +2 +3 +execute stmt using @b, @a; +a +1 +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt using @b, @b; +a +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt using @a, @a; +a +1 +1 +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt from 'select * from t1 where a > ? except select * from t2 where a > ?;'; +set @a=0, @b=1; +execute stmt using @a, @a; +a +2 +execute stmt using @b, @a; +a +2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt using @b, @b; +a +2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt using @a, @b; +a +1 +2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt from 'select * from t1 where a > ? union select * from t2 where a > ?;'; +set @a=0, @b=1; +execute stmt using @a, @a; +a +1 +2 +3 +execute stmt using @b, @a; +a +1 +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt using @b, @b; +a +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt using @a, @b; +a +1 +2 +3 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt from 'select * from t1 union all select * from t1 intersect select * from t2;'; +execute stmt; +a +1 +1 +2 +prepare stmt from '(select * from t1 union all select * from t1) intersect select * from t2;'; +execute stmt; +a +1 +prepare stmt from '(select * from t1 union all select * from t1 intersect select * from t2) order by a limit 2;'; +execute stmt; +a +1 +1 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t; +create table t(a int, index idx_a(a)); +prepare stmt from 'select * from t;'; +execute stmt; +a +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t;'; +execute stmt; +a +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t; +CREATE TABLE t(c1 INT, index idx_c(c1)); +prepare stmt from 'select * from t use index(idx_c) where c1 > 1;'; +execute stmt; +c1 +execute stmt; +c1 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +ALTER TABLE t ALTER INDEX idx_c INVISIBLE; +select * from t use index(idx_c) where c1 > 1; +Error 1176 (42000): Key 'idx_c' doesn't exist in table 't' +execute stmt; +Error 1176 (42000): Key 'idx_c' doesn't exist in table 't' +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +prepare stmt from 'with recursive cte1 as (select ? c1 union all select c1 + 1 c1 from cte1 where c1 < ?) select * from cte1;'; +set @a=5, @b=4, @c=2, @d=1; +execute stmt using @d, @a; +c1 +1 +2 +3 +4 +5 +execute stmt using @d, @b; +c1 +1 +2 +3 +4 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt using @c, @b; +c1 +2 +3 +4 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +prepare stmt from 'with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < ?) select * from cte1 order by c1;'; +set @a=10, @b=2; +execute stmt using @a; +c1 +1 +2 +2 +3 +3 +4 +4 +5 +5 +6 +6 +7 +7 +8 +8 +9 +9 +10 +10 +execute stmt using @b; +c1 +1 +2 +2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +prepare stmt from 'with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < ? union all select c1 + ? c1 from cte1 where c1 < ?) select * from cte1 order by c1;'; +set @a=1, @b=2, @c=3, @d=4, @e=5; +execute stmt using @c, @b, @e; +c1 +1 +2 +2 +3 +3 +3 +4 +4 +5 +5 +5 +6 +6 +execute stmt using @b, @a, @d; +c1 +1 +2 +2 +2 +3 +3 +3 +4 +4 +4 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +drop table if exists t1; +create table t1(a int); +insert into t1 values(1); +insert into t1 values(2); +prepare stmt from 'SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*? AS b UNION ALL SELECT b+? FROM qn WHERE b=?) SELECT * FROM qn WHERE b=a);'; +show warnings; +Level Code Message +Warning 1105 skip prepared plan-cache: find table executor__explainfor.qn failed: [schema:1146]Table 'executor__explainfor.qn' doesn't exist +set @a=1, @b=2, @c=3, @d=4, @e=5, @f=0; +execute stmt using @f, @a, @f; +a +1 +execute stmt using @a, @b, @a; +a +1 +2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt using @a, @b, @a; +a +1 +2 +prepare stmt from 'with recursive c(p) as (select ?), cte(a, b) as (select 1, 1 union select a+?, 1 from cte, c where a < ?) select * from cte order by 1, 2;'; +show warnings; +Level Code Message +Warning 1105 skip prepared plan-cache: find table executor__explainfor.cte failed: [schema:1146]Table 'executor__explainfor.cte' doesn't exist +execute stmt using @a, @a, @e; +a b +1 1 +2 1 +3 1 +4 1 +5 1 +execute stmt using @b, @b, @c; +a b +1 1 +3 1 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t; +create table t(a int); +prepare stmt from 'select * from t;'; +execute stmt; +a +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +drop database if exists plan_cache; +create database plan_cache; +use plan_cache; +create table t(a int); +insert into t values(1); +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt from 'select * from t;'; +execute stmt; +a +1 +execute stmt; +a +1 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt; +a +1 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +set @@session.tidb_enable_list_partition=1; +drop table if exists t; +create table t(a int, b int) PARTITION BY LIST (a) ( PARTITION p0 VALUES IN (1, 2, 3), PARTITION p1 VALUES IN (4, 5, 6)); +set @@tidb_partition_prune_mode='static'; +prepare stmt from 'select * from t;'; +execute stmt; +a b +execute stmt; +a b +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set @@session.tidb_enable_list_partition=default; +set @@tidb_partition_prune_mode=default; +drop table if exists t12, t97; +CREATE TABLE t12(a INT, b INT); +CREATE TABLE t97(a INT, b INT UNIQUE NOT NULL); +EXPLAIN SELECT t12.a, t12.b FROM t12 LEFT JOIN t97 on t12.b = t97.b; +id estRows task access object operator info +TableReader_7 10000.00 root data:TableFullScan_6 +└─TableFullScan_6 10000.00 cop[tikv] table:t12 keep order:false, stats:pseudo +EXPLAIN SELECT t12.a, t12.b FROM t12 LEFT JOIN t97 use index () on t12.b = t97.b; +id estRows task access object operator info +TableReader_7 10000.00 root data:TableFullScan_6 +└─TableFullScan_6 10000.00 cop[tikv] table:t12 keep order:false, stats:pseudo +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t; +create table t(a int); +prepare stmt from 'select * from t;'; +execute stmt; +a +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +set tidb_enable_prepared_plan_cache=1; +execute stmt; +Error 8111 (HY000): Prepared statement not found +prepare stmt from 'select * from t;'; +execute stmt; +a +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop view if exists view1, view2, view3, view4; +drop table if exists view_t; +create table view_t (a int,b int); +insert into view_t values(1,2); +create definer='root'@'localhost' view view1 as select * from view_t; +create definer='root'@'localhost' view view2(c,d) as select * from view_t; +create definer='root'@'localhost' view view3(c,d) as select a,b from view_t; +create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb; +prepare stmt1 from 'select * from view1;'; +execute stmt1; +a b +1 2 +execute stmt1; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt2 from 'select * from view2;'; +execute stmt2; +c d +1 2 +execute stmt2; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt3 from 'select * from view3;'; +execute stmt3; +c d +1 2 +execute stmt3; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +prepare stmt4 from 'select * from view4;'; +execute stmt4; +a b +1 2 +execute stmt4; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +drop table view_t; +create table view_t(c int,d int); +execute stmt1; +Error 1356 (HY000): View 'plan_cache.view1' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +execute stmt2; +Error 1356 (HY000): View 'plan_cache.view2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +execute stmt3; +Error 1356 (HY000): View 'plan_cache.view3' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them +drop table view_t; +create table view_t(a int,b int,c int); +insert into view_t values(1,2,3); +execute stmt1; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt1; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt2; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt2; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt3; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt3; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt4; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt4; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +alter table view_t drop column a; +alter table view_t add column a int after b; +update view_t set a=1; +execute stmt1; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt1; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt2; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt2; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt3; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt3; +c d +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt4; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt4; +a b +1 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +drop table view_t; +drop view view1,view2,view3,view4; +set @@tidb_enable_window_function = 1; +drop table if exists t; +create table t(a int, b int); +insert into t values (1,1),(1,2),(2,1),(2,2); +create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t; +prepare stmt from 'select * from v;'; +execute stmt; +a first_value(a) over(rows between 1 preceding and 1 following) last_value(a) over(rows between 1 preceding and 1 following) +1 1 1 +1 1 2 +2 1 2 +2 2 2 +execute stmt; +a first_value(a) over(rows between 1 preceding and 1 following) last_value(a) over(rows between 1 preceding and 1 following) +1 1 1 +1 1 2 +2 1 2 +2 2 2 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +drop view v; +set @@tidb_enable_window_function = default; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +drop table if exists t; +create table t(a int, index idx_a(a)); +drop table if exists r; +create table r(a int); +prepare stmt from 'select * from t;'; +create binding for select * from t using select /*+ use_index(t, idx_a) */ * from t; +execute stmt; +a +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt; +a +select @@last_plan_from_binding; +@@last_plan_from_binding +1 +create binding for select * from t using select /*+ ignore_plan_cache() */ * from t; +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt; +a +select @@last_plan_from_binding; +@@last_plan_from_binding +1 +create binding for select * from t using select /*+ use_index(t, idx_a) */ * from t; +execute stmt; +a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt; +a +select @@last_plan_from_binding; +@@last_plan_from_binding +1 +prepare stmt_join from 'select * from t, r where r.a = t.a;'; +create binding for select * from t, r where r.a = t.a using select /*+ straight_join() */* from t, r where r.a = t.a; +execute stmt_join; +a a +execute stmt_join; +a a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt_join; +a a +select @@last_plan_from_binding; +@@last_plan_from_binding +1 +create binding for select * from t, r where r.a = t.a using select /*+ ignore_plan_cache() */* from t, r where r.a = t.a; +execute stmt_join; +a a +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt_join; +a a +select @@last_plan_from_binding; +@@last_plan_from_binding +1 +create binding for select * from t, r where r.a = t.a using select /*+ straight_join() */* from t, r where r.a = t.a; +execute stmt_join; +a a +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +execute stmt_join; +a a +select @@last_plan_from_binding; +@@last_plan_from_binding +1 diff --git a/tests/integrationtest/r/executor/grant.result b/tests/integrationtest/r/executor/grant.result new file mode 100644 index 0000000000000..6fffe53952a96 --- /dev/null +++ b/tests/integrationtest/r/executor/grant.result @@ -0,0 +1,332 @@ +drop user if exists 'testWithGrant'@'localhost'; +CREATE USER 'testWithGrant'@'localhost' IDENTIFIED BY '123'; +SELECT * FROM mysql.db WHERE User="testWithGrant" and host="localhost"; +Host DB User Select_priv Insert_priv Update_priv Delete_priv Create_priv Drop_priv Grant_priv References_priv Index_priv Alter_priv Create_tmp_table_priv Lock_tables_priv Create_view_priv Show_view_priv Create_routine_priv Alter_routine_priv Execute_priv Event_priv Trigger_priv +GRANT select ON executor__grant.* TO 'testWithGrant'@'localhost' WITH GRANT OPTION; +SELECT grant_priv FROM mysql.DB WHERE User="testWithGrant" and host="localhost" and db="executor__grant"; +grant_priv +Y +drop user if exists 'testWithGrant1'; +CREATE USER 'testWithGrant1'; +SELECT grant_priv FROM mysql.user WHERE User="testWithGrant1"; +grant_priv +N +GRANT ALL ON *.* TO 'testWithGrant1'; +SELECT grant_priv FROM mysql.user WHERE User="testWithGrant1"; +grant_priv +N +GRANT ALL ON *.* TO 'testWithGrant1' WITH GRANT OPTION; +SELECT grant_priv FROM mysql.user WHERE User="testWithGrant1"; +grant_priv +Y +drop user if exists 'dduser'@'%'; +drop DATABASE if exists `dddb_%`; +CREATE USER 'dduser'@'%' IDENTIFIED by '123456'; +CREATE DATABASE `dddb_%`; +CREATE table `dddb_%`.`te%` (id int); +GRANT ALL PRIVILEGES ON `dddb_%`.* TO 'dduser'@'%'; +GRANT ALL PRIVILEGES ON `dddb_%`.`te%` to 'dduser'@'%'; +DROP USER IF EXISTS 'test'@'%'; +SET sql_mode='NO_AUTO_CREATE_USER'; +GRANT ALL PRIVILEGES ON *.* to 'test'@'%' IDENTIFIED BY 'xxx'; +Error 1410 (42000): You are not allowed to create a user with GRANT +set sql_mode=default; +DROP USER IF EXISTS 'test'@'%'; +SET SQL_MODE=''; +GRANT ALL PRIVILEGES ON *.* to 'test'@'%' IDENTIFIED BY 'xxx'; +SELECT user FROM mysql.user WHERE user='test' and host='%'; +user +test +DROP USER IF EXISTS 'test'@'%'; +GRANT ALL PRIVILEGES ON *.* to 'test'@'%'; +SELECT user, plugin FROM mysql.user WHERE user='test' and host='%'; +user plugin +test mysql_native_password +DROP USER IF EXISTS 'test'@'%'; +set sql_mode=default; +CREATE USER '1234567890abcdefGHIKL1234567890abcdefGHIKL@localhost'; +Error 1470 (HY000): String '1234567890abcdefGHIKL1234567890abcdefGHIKL@localhost' is too long for user name (should be no longer than 32) +CREATE USER 'some_user_name@host_1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890X'; +Error 1470 (HY000): String 'some_user_name@host_1234567890abcdefghij1234567890abcdefghij1234567890' is too long for user name (should be no longer than 32) +drop role if exists r1, r2, r3, r4; +create role r1, r2, r3; +create table executor__grant.testatomic(x int); +grant update, select, insert, delete on *.* to r1, r2, r4; +Error 1410 (42000): You are not allowed to create a user with GRANT +select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.user where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +Update_priv Select_priv Insert_priv Delete_priv +N N N N +N N N N +N N N N +grant update, select, insert, delete on *.* to r1, r2, r3; +revoke all on *.* from r1, r2, r4, r3; +Error 1105 (HY000): Unknown user: r4@% +select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.user where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +Update_priv Select_priv Insert_priv Delete_priv +Y Y Y Y +Y Y Y Y +Y Y Y Y +grant update, select, insert, delete on executor__grant.* to r1, r2, r4; +Error 1410 (42000): You are not allowed to create a user with GRANT +select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.db where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +Update_priv Select_priv Insert_priv Delete_priv +grant update, select, insert, delete on executor__grant.* to r1, r2, r3; +revoke all on *.* from r1, r2, r4, r3; +Error 1105 (HY000): Unknown user: r4@% +select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.db where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +Update_priv Select_priv Insert_priv Delete_priv +Y Y Y Y +Y Y Y Y +Y Y Y Y +grant update, select, insert, delete on executor__grant.testatomic to r1, r2, r4; +Error 1410 (42000): You are not allowed to create a user with GRANT +select Table_priv from mysql.tables_priv where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +Table_priv +grant update, select, insert, delete on executor__grant.testatomic to r1, r2, r3; +revoke all on *.* from r1, r2, r4, r3; +Error 1105 (HY000): Unknown user: r4@% +select Table_priv from mysql.tables_priv where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +Table_priv +Select,Insert,Update,Delete +Select,Insert,Update,Delete +Select,Insert,Update,Delete +drop role if exists r1, r2, r3, r4; +drop table executor__grant.testatomic; +DROP USER IF EXISTS 'test'@'%'; +CREATE USER 'test'@'%' IDENTIFIED BY 'test'; +GRANT SELECT ON executor__grant.* to 'test'; +SELECT user,host FROM mysql.user WHERE user='test' and host='%'; +user host +test % +SET SQL_MODE='ANSI_QUOTES'; +GRANT ALL PRIVILEGES ON video_ulimit.* TO web@'%' IDENTIFIED BY 'eDrkrhZ>l2sV'; +REVOKE ALL PRIVILEGES ON video_ulimit.* FROM web@'%'; +DROP USER IF EXISTS 'web'@'%'; +set sql_mode=default; +DROP USER if exists 'ssl_auser'@'%'; +DROP USER if exists 'ssl_buser'@'%'; +DROP USER if exists 'ssl_cuser'@'%'; +DROP USER if exists 'ssl_duser'@'%'; +DROP USER if exists 'ssl_euser'@'%'; +DROP USER if exists 'ssl_fuser'@'%'; +DROP USER if exists 'ssl_guser'@'%'; +drop user if exists 'u1'@'%'; +drop user if exists 'u2'@'%'; +drop user if exists 'u3'@'%'; +CREATE USER 'ssl_auser'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'; +CREATE USER 'ssl_buser'@'%' require subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'; +CREATE USER 'ssl_cuser'@'%' require cipher 'AES128-GCM-SHA256'; +CREATE USER 'ssl_duser'@'%'; +CREATE USER 'ssl_euser'@'%' require none; +CREATE USER 'ssl_fuser'@'%' require ssl; +CREATE USER 'ssl_guser'@'%' require x509; +select * from mysql.global_priv where `user` like 'ssl_%'; +Host User Priv +% ssl_auser {"ssl_type":3,"ssl_cipher":"AES128-GCM-SHA256","x509_issuer":"/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US","x509_subject":"/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH"} +% ssl_buser {"ssl_type":3,"ssl_cipher":"AES128-GCM-SHA256","x509_subject":"/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH"} +% ssl_cuser {"ssl_type":3,"ssl_cipher":"AES128-GCM-SHA256"} +% ssl_duser {} +% ssl_euser {} +% ssl_fuser {"ssl_type":1} +% ssl_guser {"ssl_type":2} +CREATE USER 'u1'@'%'; +GRANT ALL ON *.* TO 'u1'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' and subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH'; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'; +priv +{"ssl_type":3,"x509_issuer":"/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US","x509_subject":"/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH"} +GRANT ALL ON *.* TO 'u1'@'%' require cipher 'AES128-GCM-SHA256'; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'; +priv +{"ssl_type":3,"ssl_cipher":"AES128-GCM-SHA256"} +GRANT select ON *.* TO 'u1'@'%'; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'; +priv +{"ssl_type":3,"ssl_cipher":"AES128-GCM-SHA256"} +GRANT ALL ON *.* TO 'u1'@'%' require none; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'; +priv +{} +CREATE USER 'u2'@'%'; +alter user 'u2'@'%' require ssl; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'; +priv +{"ssl_type":1} +alter user 'u2'@'%' require x509; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'; +priv +{"ssl_type":2} +alter user 'u2'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'; +priv +{"ssl_type":3,"ssl_cipher":"AES128-GCM-SHA256","x509_issuer":"/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US","x509_subject":"/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH"} +alter user 'u2'@'%' require none; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'; +priv +{} +CREATE USER 'u3'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'; +show create user 'u3'; +CREATE USER for u3@% +CREATE USER 'u3'@'%' IDENTIFIED WITH 'mysql_native_password' AS '' REQUIRE CIPHER 'AES128-GCM-SHA256' ISSUER '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' SUBJECT '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' PASSWORD EXPIRE DEFAULT ACCOUNT UNLOCK PASSWORD HISTORY DEFAULT PASSWORD REUSE INTERVAL DEFAULT +CREATE USER 'u4'@'%' require issuer 'CN=TiDB,OU=PingCAP'; +Error 1105 (HY000): invalid X509_NAME input: CN=TiDB,OU=PingCAP +CREATE USER 'u5'@'%' require subject '/CN=TiDB\OU=PingCAP'; +Error 1105 (HY000): invalid X509_NAME input: /CN=TiDBOU=PingCAP +CREATE USER 'u6'@'%' require subject '/CN=TiDB\NC=PingCAP'; +Error 1105 (HY000): invalid X509_NAME input: /CN=TiDBNC=PingCAP +CREATE USER 'u7'@'%' require cipher 'AES128-GCM-SHA1'; +Error 1105 (HY000): Unsupported cipher suit: AES128-GCM-SHA1 +CREATE USER 'u8'@'%' require subject '/CN'; +Error 1105 (HY000): invalid X509_NAME input: /CN +CREATE USER 'u9'@'%' require cipher 'TLS_AES_256_GCM_SHA384' cipher 'RC4-SHA'; +Error 1105 (HY000): Duplicate require CIPHER clause +CREATE USER 'u9'@'%' require issuer 'CN=TiDB,OU=PingCAP' issuer 'CN=TiDB,OU=PingCAP2'; +Error 1105 (HY000): Duplicate require ISSUER clause +CREATE USER 'u9'@'%' require subject '/CN=TiDB\OU=PingCAP' subject '/CN=TiDB\OU=PingCAP2'; +Error 1105 (HY000): Duplicate require SUBJECT clause +CREATE USER 'u9'@'%' require ssl ssl; +[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 36 near "ssl;" +CREATE USER 'u9'@'%' require x509 x509; +[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 38 near "x509;" +drop user if exists 'maint_auth_str1'@'%'; +CREATE USER 'maint_auth_str1'@'%' IDENTIFIED BY 'foo'; +SELECT authentication_string FROM mysql.user WHERE `Host` = '%' and `User` = 'maint_auth_str1'; +authentication_string +*F3A2A51A9B0F2BE2468926B4132313728C250DBF +ALTER USER 'maint_auth_str1'@'%' REQUIRE SSL; +SELECT authentication_string FROM mysql.user WHERE `Host` = '%' and `User` = 'maint_auth_str1'; +authentication_string +*F3A2A51A9B0F2BE2468926B4132313728C250DBF +drop table if exists xx; +drop user if exists 'sync_ci_data'@'%'; +create table xx (id int); +CREATE USER 'sync_ci_data'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU'; +GRANT USAGE ON *.* TO 'sync_ci_data'@'%'; +GRANT USAGE ON sync_ci_data.* TO 'sync_ci_data'@'%'; +GRANT USAGE ON executor__grant.* TO 'sync_ci_data'@'%'; +GRANT USAGE ON executor__grant.xx TO 'sync_ci_data'@'%'; +drop user if exists issue27867; +create user issue27867; +grant all on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant all on PERFormanCE_scHemA.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'PERFormanCE_scHemA' +grant select on performance_schema.* to issue27867; +grant insert on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant update on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant delete on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant drop on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant lock tables on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant create on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant references on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant alter on PERFormAnCE_scHemA.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'PERFormAnCE_scHemA' +grant execute on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant index on PERFormanCE_scHemA.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'PERFormanCE_scHemA' +grant create view on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +grant show view on performance_schema.* to issue27867; +Error 1044 (42000): Access denied for user 'root'@'%' to database 'performance_schema' +drop user issue27867; +drop user if exists dyn; +create user dyn; +GRANT BACKUP_ADMIN ON executor__grant.* TO dyn; +Error 3619 (HY000): Illegal privilege level specified for BACKUP_ADMIN +GRANT BOGUS_GRANT ON *.* TO dyn; +Error 3929 (HY000): Dynamic privilege 'BOGUS_GRANT' is not registered with the server. +GRANT BACKUP_Admin ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN N +GRANT SYSTEM_VARIABLES_ADMIN, BACKUP_ADMIN ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN N +dyn % SYSTEM_VARIABLES_ADMIN N +GRANT ROLE_ADMIN, BACKUP_ADMIN ON *.* TO dyn WITH GRANT OPTION; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN Y +dyn % ROLE_ADMIN Y +dyn % SYSTEM_VARIABLES_ADMIN N +GRANT SYSTEM_VARIABLES_ADMIN, Select, ROLE_ADMIN ON *.* TO dyn; +SELECT Grant_Priv FROM mysql.user WHERE `Host` = '%' AND `User` = 'dyn'; +Grant_Priv +N +SELECT WITH_GRANT_OPTION FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' AND Priv='SYSTEM_VARIABLES_ADMIN'; +WITH_GRANT_OPTION +N +GRANT CONNECTION_ADMIN, Insert ON *.* TO dyn WITH GRANT OPTION; +SELECT Grant_Priv FROM mysql.user WHERE `Host` = '%' AND `User` = 'dyn'; +Grant_Priv +Y +SELECT WITH_GRANT_OPTION FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' AND Priv='CONNECTION_ADMIN'; +WITH_GRANT_OPTION +Y +drop user if exists u29302; +create user u29302; +grant create temporary tables on NotExistsD29302.NotExistsT29302 to u29302; +Error 1144 (42000): Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used +grant lock tables on executor__grant.NotExistsT29302 to u29302; +Error 1144 (42000): Illegal GRANT/REVOKE command; please consult the manual to see which privileges can be used +grant create temporary tables (NotExistsCol) on NotExistsD29302.NotExistsT29302 to u29302; +Error 1221 (HY000): Incorrect usage of COLUMN GRANT and NON-COLUMN PRIVILEGES +drop user u29302; +drop table if exists t1; +drop user if exists user_1@localhost; +CREATE USER user_1@localhost; +CREATE TABLE T1(f1 INT); +CREATE TABLE t1(f1 INT); +Error 1050 (42S01): Table 'executor__grant.t1' already exists +GRANT SELECT ON T1 to user_1@localhost; +GRANT SELECT ON t1 to user_1@localhost; +DROP USER IF EXISTS test; +CREATE USER test; +GRANT SELECT ON `mysql`.`db` TO test; +SELECT `Grantor` FROM `mysql`.`tables_priv` WHERE User = 'test'; +Grantor +root@% +drop user if exists genius; +drop user if exists u29268; +create user genius; +select * from nonexist; +Error 1146 (42S02): Table 'executor__grant.nonexist' doesn't exist +grant Select,Insert on nonexist to 'genius'; +Error 1146 (42S02): Table 'executor__grant.nonexist' doesn't exist +create table if not exists xx (id int); +grant Select,Insert on XX to 'genius'; +grant Select,Insert on xx to 'genius'; +grant Select,Update on executor__grant.xx to 'genius'; +CREATE DATABASE d29268; +USE d29268; +CREATE USER u29268; +GRANT SELECT ON t29268 TO u29268; +Error 1146 (42S02): Table 'd29268.t29268' doesn't exist +GRANT DROP, INSERT ON t29268 TO u29268; +Error 1146 (42S02): Table 'd29268.t29268' doesn't exist +GRANT UPDATE, CREATE VIEW, SHOW VIEW ON t29268 TO u29268; +Error 1146 (42S02): Table 'd29268.t29268' doesn't exist +GRANT DELETE, REFERENCES, ALTER ON t29268 TO u29268; +Error 1146 (42S02): Table 'd29268.t29268' doesn't exist +GRANT CREATE ON t29268 TO u29268; +GRANT CREATE, SELECT ON t29268 TO u29268; +GRANT CREATE, DROP, INSERT ON t29268 TO u29268; +CREATE TABLE t29268 (c1 int); +INSERT INTO t29268 VALUES (1), (2); +SELECT c1 FROM t29268; +c1 +1 +2 +DROP TABLE t29268; +GRANT ALL ON t29268 TO u29268; +DROP USER u29268; +DROP DATABASE IF EXISTS d29268; +use executor__grant; diff --git a/tests/integrationtest/r/executor/import_into.result b/tests/integrationtest/r/executor/import_into.result new file mode 100644 index 0000000000000..ce7896e9cd4fb --- /dev/null +++ b/tests/integrationtest/r/executor/import_into.result @@ -0,0 +1,170 @@ +drop table if exists t; +create table t (id int); +BEGIN; +IMPORT INTO t FROM '/file.csv'; +Error 1105 (HY000): cannot run IMPORT INTO in explicit transaction +commit; +drop table if exists t; +create table t (id int); +import into t from '/file.csv' with xx=1; +Error 8163 (HY000): Unknown option xx +import into t from '/file.csv' with detached=1; +Error 8164 (HY000): Invalid option value for detached +import into t from '/file.csv' with character_set; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with detached, detached; +Error 8165 (HY000): Option detached specified more than once +import into t from '/file.csv' with character_set=true; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set=null; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set=1; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set=true; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set=''; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with character_set='aa'; +Error 8164 (HY000): Invalid option value for character_set +import into t from '/file.csv' with fields_terminated_by=null; +Error 8164 (HY000): Invalid option value for fields_terminated_by +import into t from '/file.csv' with fields_terminated_by=1; +Error 8164 (HY000): Invalid option value for fields_terminated_by +import into t from '/file.csv' with fields_terminated_by=true; +Error 8164 (HY000): Invalid option value for fields_terminated_by +import into t from '/file.csv' with fields_terminated_by=''; +Error 8164 (HY000): Invalid option value for fields_terminated_by +import into t from '/file.csv' with fields_enclosed_by=null; +Error 8164 (HY000): Invalid option value for fields_enclosed_by +import into t from '/file.csv' with fields_enclosed_by='aa'; +Error 8164 (HY000): Invalid option value for fields_enclosed_by +import into t from '/file.csv' with fields_enclosed_by=1; +Error 8164 (HY000): Invalid option value for fields_enclosed_by +import into t from '/file.csv' with fields_enclosed_by=true; +Error 8164 (HY000): Invalid option value for fields_enclosed_by +import into t from '/file.csv' with fields_escaped_by=null; +Error 8164 (HY000): Invalid option value for fields_escaped_by +import into t from '/file.csv' with fields_escaped_by='aa'; +Error 8164 (HY000): Invalid option value for fields_escaped_by +import into t from '/file.csv' with fields_escaped_by=1; +Error 8164 (HY000): Invalid option value for fields_escaped_by +import into t from '/file.csv' with fields_escaped_by=true; +Error 8164 (HY000): Invalid option value for fields_escaped_by +import into t from '/file.csv' with fields_defined_null_by=null; +Error 8164 (HY000): Invalid option value for fields_defined_null_by +import into t from '/file.csv' with fields_defined_null_by=1; +Error 8164 (HY000): Invalid option value for fields_defined_null_by +import into t from '/file.csv' with fields_defined_null_by=true; +Error 8164 (HY000): Invalid option value for fields_defined_null_by +import into t from '/file.csv' with lines_terminated_by=null; +Error 8164 (HY000): Invalid option value for lines_terminated_by +import into t from '/file.csv' with lines_terminated_by=1; +Error 8164 (HY000): Invalid option value for lines_terminated_by +import into t from '/file.csv' with lines_terminated_by=true; +Error 8164 (HY000): Invalid option value for lines_terminated_by +import into t from '/file.csv' with lines_terminated_by=''; +Error 8164 (HY000): Invalid option value for lines_terminated_by +import into t from '/file.csv' with skip_rows=null; +Error 8164 (HY000): Invalid option value for skip_rows +import into t from '/file.csv' with skip_rows=''; +Error 8164 (HY000): Invalid option value for skip_rows +import into t from '/file.csv' with skip_rows=-1; +Error 8164 (HY000): Invalid option value for skip_rows +import into t from '/file.csv' with skip_rows=true; +Error 8164 (HY000): Invalid option value for skip_rows +import into t from '/file.csv' with split_file='aa'; +Error 8164 (HY000): Invalid option value for split_file +import into t from '/file.csv' with split_file, skip_rows=2; +Error 8164 (HY000): Invalid option value for skip_rows, should be <= 1 when split-file is enabled +import into t from '/file.csv' with disk_quota='aa'; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with disk_quota='220MiBxxx'; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with disk_quota=1; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with disk_quota=false; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with disk_quota=null; +Error 8164 (HY000): Invalid option value for disk_quota +import into t from '/file.csv' with thread='aa'; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with thread=0; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with thread=false; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with thread=-100; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with thread=null; +Error 8164 (HY000): Invalid option value for thread +import into t from '/file.csv' with max_write_speed='aa'; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with max_write_speed='11aa'; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with max_write_speed=null; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with max_write_speed=-1; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with max_write_speed=false; +Error 8164 (HY000): Invalid option value for max_write_speed +import into t from '/file.csv' with checksum_table=''; +Error 8164 (HY000): Invalid option value for checksum_table +import into t from '/file.csv' with checksum_table=123; +Error 8164 (HY000): Invalid option value for checksum_table +import into t from '/file.csv' with checksum_table=false; +Error 8164 (HY000): Invalid option value for checksum_table +import into t from '/file.csv' with checksum_table=null; +Error 8164 (HY000): Invalid option value for checksum_table +import into t from '/file.csv' with record_errors='aa'; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with record_errors='111aa'; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with record_errors=-123; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with record_errors=null; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with record_errors=true; +Error 8164 (HY000): Invalid option value for record_errors +import into t from '/file.csv' with cloud_storage_uri=123; +Error 8164 (HY000): Invalid option value for cloud_storage_uri +import into t from '/file.csv' with cloud_storage_uri=':'; +Error 8164 (HY000): Invalid option value for cloud_storage_uri +import into t from '/file.csv' with cloud_storage_uri='sdsd'; +Error 8164 (HY000): Invalid option value for cloud_storage_uri +import into t from '/file.csv' with cloud_storage_uri='http://sdsd'; +Error 8164 (HY000): Invalid option value for cloud_storage_uri +import into t from '/file.csv' format 'parquet' with character_set='utf8'; +Error 8166 (HY000): Unsupported option character_set for non-CSV format +import into t from '/file.csv' format 'sql' with character_set='utf8'; +Error 8166 (HY000): Unsupported option character_set for non-CSV format +import into t from '/file.csv' format 'parquet' with fields_terminated_by='a'; +Error 8166 (HY000): Unsupported option fields_terminated_by for non-CSV format +import into t from '/file.csv' format 'sql' with fields_terminated_by='a'; +Error 8166 (HY000): Unsupported option fields_terminated_by for non-CSV format +import into t from '/file.csv' format 'parquet' with fields_enclosed_by='a'; +Error 8166 (HY000): Unsupported option fields_enclosed_by for non-CSV format +import into t from '/file.csv' format 'sql' with fields_enclosed_by='a'; +Error 8166 (HY000): Unsupported option fields_enclosed_by for non-CSV format +import into t from '/file.csv' format 'parquet' with fields_escaped_by='a'; +Error 8166 (HY000): Unsupported option fields_escaped_by for non-CSV format +import into t from '/file.csv' format 'sql' with fields_escaped_by='a'; +Error 8166 (HY000): Unsupported option fields_escaped_by for non-CSV format +import into t from '/file.csv' format 'parquet' with fields_defined_null_by='a'; +Error 8166 (HY000): Unsupported option fields_defined_null_by for non-CSV format +import into t from '/file.csv' format 'sql' with fields_defined_null_by='a'; +Error 8166 (HY000): Unsupported option fields_defined_null_by for non-CSV format +import into t from '/file.csv' format 'parquet' with lines_terminated_by='a'; +Error 8166 (HY000): Unsupported option lines_terminated_by for non-CSV format +import into t from '/file.csv' format 'sql' with lines_terminated_by='a'; +Error 8166 (HY000): Unsupported option lines_terminated_by for non-CSV format +import into t from '/file.csv' format 'parquet' with skip_rows=1; +Error 8166 (HY000): Unsupported option skip_rows for non-CSV format +import into t from '/file.csv' format 'sql' with skip_rows=1; +Error 8166 (HY000): Unsupported option skip_rows for non-CSV format +import into t from '/file.csv' format 'parquet' with split_file; +Error 8166 (HY000): Unsupported option split_file for non-CSV format +import into t from '/file.csv' format 'sql' with split_file; +Error 8166 (HY000): Unsupported option split_file for non-CSV format +import into t from ''; +Error 8156 (HY000): The value of INFILE must not be empty when LOAD DATA from LOCAL +import into t from '/a.csv' format 'xx'; +Error 8157 (HY000): The FORMAT 'xx' is not supported diff --git a/tests/integrationtest/r/executor/index_advise.result b/tests/integrationtest/r/executor/index_advise.result new file mode 100644 index 0000000000000..01b62af4a386c --- /dev/null +++ b/tests/integrationtest/r/executor/index_advise.result @@ -0,0 +1,198 @@ +set @@session.tidb_opt_advanced_join_hint=0; +drop table if exists t1, t2; +create table t1( +pnbrn_cnaps varchar(5) not null, +new_accno varchar(18) not null, +primary key(pnbrn_cnaps,new_accno) nonclustered +); +create table t2( +pnbrn_cnaps varchar(5) not null, +txn_accno varchar(18) not null, +txn_dt date not null, +yn_frz varchar(1) default null +); +insert into t1(pnbrn_cnaps,new_accno) values ("40001","123"); +insert into t2(pnbrn_cnaps, txn_accno, txn_dt, yn_frz) values ("40001","123","20221201","0"); +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +explain format='brief' update +/*+ inl_join(a) */ +t2 b, +( +select t1.pnbrn_cnaps, +t1.new_accno +from t1 +where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; +id estRows task access object operator info +Update N/A root N/A +└─IndexJoin 12.50 root inner join, inner:IndexReader, outer key:executor__index_advise.t2.pnbrn_cnaps, executor__index_advise.t2.txn_accno, inner key:executor__index_advise.t1.pnbrn_cnaps, executor__index_advise.t1.new_accno, equal cond:eq(executor__index_advise.t2.pnbrn_cnaps, executor__index_advise.t1.pnbrn_cnaps), eq(executor__index_advise.t2.txn_accno, executor__index_advise.t1.new_accno) + ├─TableReader(Build) 10.00 root data:Selection + │ └─Selection 10.00 cop[tikv] eq(executor__index_advise.t2.txn_dt, 2022-12-01) + │ └─TableFullScan 10000.00 cop[tikv] table:b keep order:false, stats:pseudo + └─IndexReader(Probe) 10.00 root index:Selection + └─Selection 10.00 cop[tikv] eq(executor__index_advise.t1.pnbrn_cnaps, "40001") + └─IndexRangeScan 10.00 cop[tikv] table:t1, index:PRIMARY(pnbrn_cnaps, new_accno) range: decided by [eq(executor__index_advise.t1.pnbrn_cnaps, executor__index_advise.t2.pnbrn_cnaps) eq(executor__index_advise.t1.new_accno, executor__index_advise.t2.txn_accno)], keep order:false, stats:pseudo +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +explain format='brief' update +/*+ inl_join(a) */ +t2 b, +( +select t1.pnbrn_cnaps, +t1.new_accno +from t1 +where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; +id estRows task access object operator info +Update N/A root N/A +└─HashJoin 12.50 root inner join, equal:[eq(executor__index_advise.t2.pnbrn_cnaps, executor__index_advise.t1.pnbrn_cnaps) eq(executor__index_advise.t2.txn_accno, executor__index_advise.t1.new_accno)] + ├─IndexReader(Build) 10.00 root index:IndexRangeScan + │ └─IndexRangeScan 10.00 cop[tikv] table:t1, index:PRIMARY(pnbrn_cnaps, new_accno) range:["40001","40001"], keep order:false, stats:pseudo + └─TableReader(Probe) 10.00 root data:Selection + └─Selection 10.00 cop[tikv] eq(executor__index_advise.t2.txn_dt, 2022-12-01) + └─TableFullScan 10000.00 cop[tikv] table:b keep order:false, stats:pseudo +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +update +/*+ inl_join(a) */ +t2 b, +( +select t1.pnbrn_cnaps, +t1.new_accno +from t1 +where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; +select yn_frz from t2; +yn_frz +1 +set @@session.tidb_opt_advanced_join_hint=default; +set @@session.tidb_enable_inl_join_inner_multi_pattern=default; +set @@tidb_opt_advanced_join_hint=0; +drop table if exists tbl_miss, tbl_src; +create table tbl_miss( +id bigint(20) unsigned not null, +txn_dt date default null, +perip_sys_uuid varchar(32) not null, +rvrs_idr varchar(1) not null, +primary key(id) clustered, +key idx1 (txn_dt, perip_sys_uuid, rvrs_idr) +); +insert into tbl_miss (id,txn_dt,perip_sys_uuid,rvrs_idr) values (1,"20221201","123","1"); +create table tbl_src( +txn_dt date default null, +uuid varchar(32) not null, +rvrs_idr char(1), +expd_inf varchar(5000), +primary key(uuid,rvrs_idr) nonclustered +); +insert into tbl_src (txn_dt,uuid,rvrs_idr) values ("20221201","123","1"); +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +explain format='brief' select /*+ use_index(mis,) inl_join(src) */ +* +from tbl_miss mis +,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( +case when isnull(src.expd_inf) = 1 then '' +else +substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, +instr(substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) +end +) != '01'; +id estRows task access object operator info +HashJoin 104.17 root inner join, equal:[eq(executor__index_advise.tbl_miss.perip_sys_uuid, executor__index_advise.tbl_src.uuid) eq(executor__index_advise.tbl_miss.rvrs_idr, executor__index_advise.tbl_src.rvrs_idr) eq(executor__index_advise.tbl_miss.txn_dt, executor__index_advise.tbl_src.txn_dt)] +├─TableReader(Build) 83.33 root data:Selection +│ └─Selection 83.33 cop[tikv] ge(executor__index_advise.tbl_miss.txn_dt, 2022-12-01), not(isnull(executor__index_advise.tbl_miss.txn_dt)) +│ └─TableRangeScan 250.00 cop[tikv] table:mis range:[1,10000], keep order:false, stats:pseudo +└─Selection(Probe) 2666.67 root ne(case(eq(isnull(executor__index_advise.tbl_src.expd_inf), 1), "", substr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), plus(instr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), "~~a4"), 4), minus(instr(substr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), plus(instr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), "~~a4"), 4), length(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"))), "~~"), 1))), "01") + └─TableReader 3333.33 root data:Selection + └─Selection 3333.33 cop[tikv] ge(executor__index_advise.tbl_src.txn_dt, 2022-12-01), not(isnull(executor__index_advise.tbl_src.txn_dt)) + └─TableFullScan 10000.00 cop[tikv] table:src keep order:false, stats:pseudo +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +explain format='brief' select /*+ use_index(mis,) inl_join(src) */ +* +from tbl_miss mis +,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( +case when isnull(src.expd_inf) = 1 then '' +else +substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, +instr(substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) +end +) != '01'; +id estRows task access object operator info +IndexJoin 104.17 root inner join, inner:Selection, outer key:executor__index_advise.tbl_miss.perip_sys_uuid, executor__index_advise.tbl_miss.rvrs_idr, inner key:executor__index_advise.tbl_src.uuid, executor__index_advise.tbl_src.rvrs_idr, equal cond:eq(executor__index_advise.tbl_miss.perip_sys_uuid, executor__index_advise.tbl_src.uuid), eq(executor__index_advise.tbl_miss.rvrs_idr, executor__index_advise.tbl_src.rvrs_idr), eq(executor__index_advise.tbl_miss.txn_dt, executor__index_advise.tbl_src.txn_dt) +├─TableReader(Build) 83.33 root data:Selection +│ └─Selection 83.33 cop[tikv] ge(executor__index_advise.tbl_miss.txn_dt, 2022-12-01), not(isnull(executor__index_advise.tbl_miss.txn_dt)) +│ └─TableRangeScan 250.00 cop[tikv] table:mis range:[1,10000], keep order:false, stats:pseudo +└─Selection(Probe) 222222.22 root ne(case(eq(isnull(executor__index_advise.tbl_src.expd_inf), 1), "", substr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), plus(instr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), "~~a4"), 4), minus(instr(substr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), plus(instr(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"), "~~a4"), 4), length(concat_ws("", executor__index_advise.tbl_src.expd_inf, "~~"))), "~~"), 1))), "01") + └─IndexLookUp 83.33 root + ├─IndexRangeScan(Build) 83.33 cop[tikv] table:src, index:PRIMARY(uuid, rvrs_idr) range: decided by [eq(executor__index_advise.tbl_src.uuid, executor__index_advise.tbl_miss.perip_sys_uuid) eq(executor__index_advise.tbl_src.rvrs_idr, executor__index_advise.tbl_miss.rvrs_idr)], keep order:false, stats:pseudo + └─Selection(Probe) 83.33 cop[tikv] ge(executor__index_advise.tbl_src.txn_dt, 2022-12-01), not(isnull(executor__index_advise.tbl_src.txn_dt)) + └─TableRowIDScan 83.33 cop[tikv] table:src keep order:false, stats:pseudo +select /*+ use_index(mis,) inl_join(src) */ +* +from tbl_miss mis +,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( +case when isnull(src.expd_inf) = 1 then '' +else +substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, +instr(substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) +end +) != '01'; +id txn_dt perip_sys_uuid rvrs_idr txn_dt uuid rvrs_idr expd_inf +1 2022-12-01 123 1 2022-12-01 123 1 NULL +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +select /*+ use_index(mis,) inl_join(src) */ +* +from tbl_miss mis +,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( +case when isnull(src.expd_inf) = 1 then '' +else +substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, +instr(substr(concat_ws('',src.expd_inf,'~~'), +instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) +end +) != '01'; +id txn_dt perip_sys_uuid rvrs_idr txn_dt uuid rvrs_idr expd_inf +1 2022-12-01 123 1 2022-12-01 123 1 NULL +set @@tidb_opt_advanced_join_hint=default; +set @@session.tidb_enable_inl_join_inner_multi_pattern=default; diff --git a/tests/integrationtest/r/executor/index_lookup_merge_join.result b/tests/integrationtest/r/executor/index_lookup_merge_join.result new file mode 100644 index 0000000000000..9e472f04fe0be --- /dev/null +++ b/tests/integrationtest/r/executor/index_lookup_merge_join.result @@ -0,0 +1,229 @@ +drop table if exists t; +CREATE TABLE `t` (`col_tinyint_key_signed` tinyint(4) DEFAULT NULL,`col_year_key_signed` year(4) DEFAULT NULL,KEY `col_tinyint_key_signed` (`col_tinyint_key_signed`),KEY `col_year_key_signed` (`col_year_key_signed`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(-100,NULL); +select /*+ inl_merge_join(t1, t2) */ count(*) from t t1 right join t t2 on t1. `col_year_key_signed` = t2. `col_tinyint_key_signed`; +count(*) +1 +drop table if exists t1, t2; +create table t1(a int, b int, c int, d int, primary key(a,b,c)); +create table t2(a int, b int, c int, d int, primary key(a,b,c)); +insert into t1 values(1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into t2 values(1,1,1,1),(2,2,2,2); +explain format = 'brief' select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc; +id estRows task access object operator info +IndexMergeJoin 100000000.00 root left outer join, inner:Projection, outer key:executor__index_lookup_merge_join.t1.a, executor__index_lookup_merge_join.t1.c, executor__index_lookup_merge_join.t1.b, inner key:executor__index_lookup_merge_join.t2.a, executor__index_lookup_merge_join.t2.c, executor__index_lookup_merge_join.t2.b +├─Projection(Build) 10000.00 root executor__index_lookup_merge_join.t1.a, executor__index_lookup_merge_join.t1.b, executor__index_lookup_merge_join.t1.c, executor__index_lookup_merge_join.t1.d +│ └─IndexLookUp 10000.00 root +│ ├─IndexFullScan(Build) 10000.00 cop[tikv] table:t1, index:PRIMARY(a, b, c) keep order:true, desc, stats:pseudo +│ └─TableRowIDScan(Probe) 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Projection(Probe) 10000.00 root executor__index_lookup_merge_join.t2.a, executor__index_lookup_merge_join.t2.b, executor__index_lookup_merge_join.t2.c, executor__index_lookup_merge_join.t2.d + └─IndexLookUp 10000.00 root + ├─IndexRangeScan(Build) 10000.00 cop[tikv] table:t2, index:PRIMARY(a, b, c) range: decided by [eq(executor__index_lookup_merge_join.t2.a, executor__index_lookup_merge_join.t1.a) eq(executor__index_lookup_merge_join.t2.b, executor__index_lookup_merge_join.t1.b) eq(executor__index_lookup_merge_join.t2.c, executor__index_lookup_merge_join.t1.c)], keep order:true, desc, stats:pseudo + └─TableRowIDScan(Probe) 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo +select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc; +a b c d a b c d +3 3 3 3 NULL NULL NULL NULL +2 2 2 2 2 2 2 2 +1 1 1 1 1 1 1 1 +drop table if exists t1, t2; +create table t1 (c_int int, primary key(c_int)); +create table t2 (c_int int, unique key (c_int)) partition by hash (c_int) partitions 4; +insert into t1 values (1), (2), (3), (4), (5); +insert into t2 select * from t1; +begin; +delete from t1 where c_int = 1; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +c_int c_int +2 2 +3 3 +4 4 +5 5 +select /*+ INL_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +c_int c_int +2 2 +3 3 +4 4 +5 5 +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +c_int c_int +2 2 +3 3 +4 4 +5 5 +commit; +drop table if exists t1, t2; +create table t1 (id bigint(20) unsigned, primary key(id)); +create table t2 (id bigint(20) unsigned); +insert into t1 values (8738875760185212610); +insert into t1 values (9814441339970117597); +insert into t2 values (8738875760185212610); +insert into t2 values (9814441339970117597); +select /*+ INL_MERGE_JOIN(t1, t2) */ * from t2 left join t1 on t1.id = t2.id order by t1.id; +id id +8738875760185212610 8738875760185212610 +9814441339970117597 9814441339970117597 +set @@tidb_opt_advanced_join_hint=0; +set @@tidb_partition_prune_mode= 'static'; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +insert into t1 values (1, 'Alice'); +insert into t2 values (1, 'Bob'); +analyze table t1, t2; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +HashJoin 1.25 root inner join, equal:[eq(executor__index_lookup_merge_join.t1.c_int, executor__index_lookup_merge_join.t2.c_int)], other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─TableReader(Build) 1.00 root data:Selection +│ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) +│ └─TableFullScan 1.00 cop[tikv] table:t2, partition:p0 keep order:false +└─PartitionUnion(Probe) 9991.00 root + ├─TableReader 1.00 root data:Selection + │ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + │ └─TableFullScan 1.00 cop[tikv] table:t1, partition:p0 keep order:false + └─TableReader 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p1 keep order:false, stats:pseudo +show warnings; +Level Code Message +Warning 1815 Optimizer Hint /*+ INL_MERGE_JOIN(t1, t2) */ is inapplicable +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +IndexHashJoin 1.25 root inner join, inner:TableReader, outer key:executor__index_lookup_merge_join.t1.c_int, inner key:executor__index_lookup_merge_join.t2.c_int, equal cond:eq(executor__index_lookup_merge_join.t1.c_int, executor__index_lookup_merge_join.t2.c_int), other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─PartitionUnion(Build) 9991.00 root +│ ├─TableReader 1.00 root data:Selection +│ │ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) +│ │ └─TableFullScan 1.00 cop[tikv] table:t1, partition:p0 keep order:false +│ └─TableReader 9990.00 root data:Selection +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) +│ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p1 keep order:false, stats:pseudo +└─TableReader(Probe) 1.25 root data:Selection + └─Selection 1.25 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) + └─TableRangeScan 1.25 cop[tikv] table:t2, partition:p0 range: decided by [executor__index_lookup_merge_join.t1.c_int], keep order:false +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +IndexJoin 1.25 root inner join, inner:TableReader, outer key:executor__index_lookup_merge_join.t1.c_int, inner key:executor__index_lookup_merge_join.t2.c_int, equal cond:eq(executor__index_lookup_merge_join.t1.c_int, executor__index_lookup_merge_join.t2.c_int), other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─PartitionUnion(Build) 9991.00 root +│ ├─TableReader 1.00 root data:Selection +│ │ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) +│ │ └─TableFullScan 1.00 cop[tikv] table:t1, partition:p0 keep order:false +│ └─TableReader 9990.00 root data:Selection +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) +│ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p1 keep order:false, stats:pseudo +└─TableReader(Probe) 1.25 root data:Selection + └─Selection 1.25 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) + └─TableRangeScan 1.25 cop[tikv] table:t2, partition:p0 range: decided by [executor__index_lookup_merge_join.t1.c_int], keep order:false +set @@tidb_partition_prune_mode= 'dynamic'; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +insert into t1 values (1, 'Alice'); +insert into t2 values (1, 'Bob'); +analyze table t1, t2; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +MergeJoin 1.00 root inner join, left key:executor__index_lookup_merge_join.t1.c_int, right key:executor__index_lookup_merge_join.t2.c_int, other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─TableReader(Build) 1.00 root partition:p0 data:Selection +│ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) +│ └─TableFullScan 1.00 cop[tikv] table:t2 keep order:true +└─TableReader(Probe) 1.00 root partition:all data:Selection + └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + └─TableFullScan 1.00 cop[tikv] table:t1 keep order:true +show warnings; +Level Code Message +Warning 1815 Optimizer Hint /*+ INL_MERGE_JOIN(t1, t2) */ is inapplicable +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +IndexHashJoin 1.00 root inner join, inner:TableReader, outer key:executor__index_lookup_merge_join.t2.c_int, inner key:executor__index_lookup_merge_join.t1.c_int, equal cond:eq(executor__index_lookup_merge_join.t2.c_int, executor__index_lookup_merge_join.t1.c_int), other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─TableReader(Build) 1.00 root partition:p0 data:Selection +│ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) +│ └─TableFullScan 1.00 cop[tikv] table:t2 keep order:false +└─TableReader(Probe) 1.00 root partition:all data:Selection + └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + └─TableRangeScan 1.00 cop[tikv] table:t1 range: decided by [executor__index_lookup_merge_join.t2.c_int], keep order:false +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +c_int c_str c_int c_str +1 Alice 1 Bob +explain format = 'brief' select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +id estRows task access object operator info +IndexJoin 1.00 root inner join, inner:TableReader, outer key:executor__index_lookup_merge_join.t2.c_int, inner key:executor__index_lookup_merge_join.t1.c_int, equal cond:eq(executor__index_lookup_merge_join.t2.c_int, executor__index_lookup_merge_join.t1.c_int), other cond:lt(executor__index_lookup_merge_join.t1.c_str, executor__index_lookup_merge_join.t2.c_str) +├─TableReader(Build) 1.00 root partition:p0 data:Selection +│ └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t2.c_str)) +│ └─TableFullScan 1.00 cop[tikv] table:t2 keep order:false +└─TableReader(Probe) 1.00 root partition:all data:Selection + └─Selection 1.00 cop[tikv] not(isnull(executor__index_lookup_merge_join.t1.c_str)) + └─TableRangeScan 1.00 cop[tikv] table:t1 range: decided by [executor__index_lookup_merge_join.t2.c_int], keep order:false +set @@tidb_opt_advanced_join_hint=DEFAULT; +set @@tidb_partition_prune_mode= DEFAULT; +drop table if exists t, s; +create table s(a int, index(a)); +create table t(a int); +insert into t values(1); +select /*+ hash_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1; +a a +1 NULL +select /*+ inl_merge_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1; +a a +1 NULL +drop table if exists t1, t2; +CREATE TABLE `t1` (`id` bigint(20) NOT NULL AUTO_INCREMENT, `t2id` bigint(20) DEFAULT NULL, PRIMARY KEY (`id`), KEY `t2id` (`t2id`)); +INSERT INTO `t1` VALUES (1,NULL); +CREATE TABLE `t2` (`id` bigint(20) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`)); +SELECT /*+ INL_MERGE_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id; +1 +1 +SELECT /*+ HASH_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id; +1 +1 +drop table if exists x; +CREATE TABLE `x` ( `a` enum('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`)); +insert into x values("x"),("x"),("b"),("y"); +SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +a a +b b +x x +x x +x x +x x +y y +SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +a a +b b +x x +x x +x x +x x +y y +drop table if exists x; +CREATE TABLE `x` ( `a` set('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`)); +insert into x values("x"),("x"),("b"),("y"); +SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +a a +b b +x x +x x +x x +x x +y y +SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +a a +b b +x x +x x +x x +x x +y y diff --git a/tests/integrationtest/r/executor/infoschema_reader.result b/tests/integrationtest/r/executor/infoschema_reader.result new file mode 100644 index 0000000000000..15ab1dfb7ce46 --- /dev/null +++ b/tests/integrationtest/r/executor/infoschema_reader.result @@ -0,0 +1,285 @@ +select * from information_schema.profiling; +QUERY_ID SEQ STATE DURATION CPU_USER CPU_SYSTEM CONTEXT_VOLUNTARY CONTEXT_INVOLUNTARY BLOCK_OPS_IN BLOCK_OPS_OUT MESSAGES_SENT MESSAGES_RECEIVED PAGE_FAULTS_MAJOR PAGE_FAULTS_MINOR SWAPS SOURCE_FUNCTION SOURCE_FILE SOURCE_LINE +set @@profiling=1; +select * from information_schema.profiling; +QUERY_ID SEQ STATE DURATION CPU_USER CPU_SYSTEM CONTEXT_VOLUNTARY CONTEXT_INVOLUNTARY BLOCK_OPS_IN BLOCK_OPS_OUT MESSAGES_SENT MESSAGES_RECEIVED PAGE_FAULTS_MAJOR PAGE_FAULTS_MINOR SWAPS SOURCE_FUNCTION SOURCE_FILE SOURCE_LINE +0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +select * from information_schema.SCHEMATA where schema_name='mysql'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def mysql utf8mb4 utf8mb4_bin NULL NULL +drop user if exists schemata_tester; +create user schemata_tester; +select count(*) from information_schema.SCHEMATA; +count(*) +1 +select * from information_schema.SCHEMATA where schema_name='mysql'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +select * from information_schema.SCHEMATA where schema_name='INFORMATION_SCHEMA'; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def INFORMATION_SCHEMA utf8mb4 utf8mb4_bin NULL NULL +CREATE ROLE r_mysql_priv; +GRANT ALL PRIVILEGES ON mysql.* TO r_mysql_priv; +GRANT r_mysql_priv TO schemata_tester; +set role r_mysql_priv; +select count(*) from information_schema.SCHEMATA; +count(*) +2 +select * from information_schema.SCHEMATA; +CATALOG_NAME SCHEMA_NAME DEFAULT_CHARACTER_SET_NAME DEFAULT_COLLATION_NAME SQL_PATH TIDB_PLACEMENT_POLICY_NAME +def INFORMATION_SCHEMA utf8mb4 utf8mb4_bin NULL NULL +def mysql utf8mb4 utf8mb4_bin NULL NULL +drop table if exists executor__infoschema_reader.t; +create table executor__infoschema_reader.t (a int, b int, primary key(a), key k1(b)); +select index_id from information_schema.tidb_indexes where table_schema = 'executor__infoschema_reader' and table_name = 't'; +index_id +0 +1 +select tidb_table_id > 0 from information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't'; +tidb_table_id > 0 +1 +drop database if exists `foo`; +CREATE DATABASE `foo` DEFAULT CHARACTER SET = 'utf8mb4'; +select default_character_set_name, default_collation_name FROM information_schema.SCHEMATA WHERE schema_name = 'foo'; +default_character_set_name default_collation_name +utf8mb4 utf8mb4_bin +drop database `foo`; +drop view if exists executor__infoschema_reader.v1; +CREATE DEFINER='root'@'localhost' VIEW executor__infoschema_reader.v1 AS SELECT 1; +select TABLE_COLLATION is null from INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW'; +TABLE_COLLATION is null +1 +1 +SELECT * FROM information_schema.views WHERE table_schema='executor__infoschema_reader' AND table_name='v1'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE CHARACTER_SET_CLIENT COLLATION_CONNECTION +def executor__infoschema_reader v1 SELECT 1 AS `1` CASCADED NO root@localhost DEFINER utf8mb4 utf8mb4_general_ci +SELECT table_catalog, table_schema, table_name, table_type, engine, version, row_format, table_rows, avg_row_length, data_length, max_data_length, index_length, data_free, auto_increment, update_time, check_time, table_collation, checksum, create_options, table_comment FROM information_schema.tables WHERE table_schema='executor__infoschema_reader' AND table_name='v1'; +table_catalog table_schema table_name table_type engine version row_format table_rows avg_row_length data_length max_data_length index_length data_free auto_increment update_time check_time table_collation checksum create_options table_comment +def executor__infoschema_reader v1 VIEW NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL VIEW +drop table if exists t; +create table t (bit bit(10) DEFAULT b'100'); +SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'executor__infoschema_reader' AND TABLE_NAME = 't'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT GENERATION_EXPRESSION +def executor__infoschema_reader t bit 1 b'100' YES bit NULL NULL 10 0 NULL NULL NULL bit(10) select,insert,update,references +drop table if exists t; +set time_zone='+08:00'; +drop table if exists t; +create table t (b timestamp(3) NOT NULL DEFAULT '1970-01-01 08:00:01.000'); +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +column_default +1970-01-01 08:00:01.000 +set time_zone='+04:00'; +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +column_default +1970-01-01 04:00:01.000 +set time_zone=default; +drop table if exists t; +create table t (a bit DEFAULT (rand())); +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +column_default +rand() +drop table if exists t; +CREATE TABLE t (`COL3` bit(1) NOT NULL,b year) ; +select column_type from information_schema.columns where TABLE_SCHEMA = 'executor__infoschema_reader' and TABLE_NAME = 't'; +column_type +bit(1) +year(4) +select ordinal_position from information_schema.columns where table_schema=database() and table_name='t' and column_name='b'; +ordinal_position +2 +select * from information_schema.ENGINES; +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +InnoDB DEFAULT Supports transactions, row-level locking, and foreign keys YES YES YES +drop table if exists t; +create table t (a varchar(255) collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +255 255 +drop table t; +create table t (a varchar(255) collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +255 1020 +drop table t; +create table t (a varchar(255) collate utf8_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +255 765 +drop table t; +create table t (a char(10) collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +10 10 +drop table t; +create table t (a char(10) collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +10 40 +drop table t; +create table t (a set('a', 'b', 'cccc') collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +8 8 +drop table t; +create table t (a set('a', 'b', 'cccc') collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +8 32 +drop table t; +create table t (a enum('a', 'b', 'cccc') collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +4 4 +drop table t; +create table t (a enum('a', 'b', 'cccc') collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +character_maximum_length character_octet_length +4 16 +drop table t; +set global tidb_ddl_enable_fast_reorg = false; +drop database if exists test_ddl_jobs; +create database test_ddl_jobs; +select db_name, job_type from information_schema.DDL_JOBS limit 1; +db_name job_type +test_ddl_jobs create schema +use test_ddl_jobs; +create table t (a int); +select db_name, table_name, job_type from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and table_name = 't'; +db_name table_name job_type +test_ddl_jobs t create table +select job_type from information_schema.DDL_JOBS group by job_type having job_type = 'create table'; +job_type +create table +select distinct job_type from information_schema.DDL_JOBS where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s'); +job_type +create table +drop user if exists DDL_JOBS_tester; +create user DDL_JOBS_tester; +select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't'; +DB_NAME TABLE_NAME +CREATE ROLE r_priv; +GRANT ALL PRIVILEGES ON test_ddl_jobs.* TO r_priv; +GRANT r_priv TO DDL_JOBS_tester; +set role r_priv; +select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't'; +DB_NAME TABLE_NAME +test_ddl_jobs t +create table tt (a int); +alter table tt add index t(a), add column b int; +select db_name, table_name, job_type from information_schema.DDL_JOBS limit 3; +db_name table_name job_type +test_ddl_jobs tt alter table multi-schema change +test_ddl_jobs tt add column /* subjob */ +test_ddl_jobs tt add index /* subjob */ /* txn */ +drop database test_ddl_jobs; +use executor__infoschema_reader; +set global tidb_ddl_enable_fast_reorg = default; +select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta' and COLUMN_NAME='table_id'; +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME +def mysql tbl def mysql stats_meta table_id 1 NULL NULL NULL NULL +create user key_column_tester; +select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME != 'CLUSTER_SLOW_QUERY'; +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION POSITION_IN_UNIQUE_CONSTRAINT REFERENCED_TABLE_SCHEMA REFERENCED_TABLE_NAME REFERENCED_COLUMN_NAME +CREATE ROLE r_stats_meta ; +GRANT ALL PRIVILEGES ON mysql.stats_meta TO r_stats_meta; +GRANT r_stats_meta TO key_column_tester; +set role r_stats_meta; +select count(*)>0 from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta'; +count(*)>0 +1 +drop table if exists e, e2; +CREATE TABLE e ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)) PARTITION BY RANGE (id) ( +PARTITION p0 VALUES LESS THAN (50), +PARTITION p1 VALUES LESS THAN (100), +PARTITION p2 VALUES LESS THAN (150), +PARTITION p3 VALUES LESS THAN (MAXVALUE)); +CREATE TABLE e2 ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)); +SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e' and table_schema=(select database()); +PARTITION_NAME TABLE_ROWS +p0 0 +p1 0 +p2 0 +p3 0 +INSERT INTO e VALUES (1669, "Jim", "Smith"), (337, "Mary", "Jones"), (16, "Frank", "White"), (2005, "Linda", "Black"); +set tidb_enable_exchange_partition='on'; +ALTER TABLE e EXCHANGE PARTITION p0 WITH TABLE e2; +INSERT INTO e VALUES (41, "Michael", "Green"); +analyze table e; +SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e'; +PARTITION_NAME TABLE_ROWS +p0 1 +p1 0 +p2 0 +p3 3 +set tidb_enable_exchange_partition=default; +select count(*) > 0 from information_schema.`METRICS_TABLES`; +count(*) > 0 +1 +select * from information_schema.`METRICS_TABLES` where table_name='tidb_qps'; +TABLE_NAME PROMQL LABELS QUANTILE COMMENT +tidb_qps sum(rate(tidb_server_query_total{$LABEL_CONDITIONS}[$RANGE_DURATION])) by (result,type,instance) instance,type,result 0 TiDB query processing numbers per second +select * from information_schema.TABLE_CONSTRAINTS where TABLE_NAME='gc_delete_range'; +CONSTRAINT_CATALOG CONSTRAINT_SCHEMA CONSTRAINT_NAME TABLE_SCHEMA TABLE_NAME CONSTRAINT_TYPE +def mysql delete_range_index mysql gc_delete_range UNIQUE +select * from information_schema.SESSION_VARIABLES where VARIABLE_NAME='tidb_retry_limit'; +VARIABLE_NAME VARIABLE_VALUE +tidb_retry_limit 10 +drop sequence if exists seq, seq2; +CREATE SEQUENCE seq maxvalue 10000000; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq'; +TABLE_CATALOG SEQUENCE_SCHEMA SEQUENCE_NAME CACHE CACHE_VALUE CYCLE INCREMENT MAX_VALUE MIN_VALUE START COMMENT +def executor__infoschema_reader seq 1 1000 0 1 10000000 1 1 +DROP SEQUENCE seq; +CREATE SEQUENCE seq start = -1 minvalue -1 maxvalue 10 increment 1 cache 10; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq'; +TABLE_CATALOG SEQUENCE_SCHEMA SEQUENCE_NAME CACHE CACHE_VALUE CYCLE INCREMENT MAX_VALUE MIN_VALUE START COMMENT +def executor__infoschema_reader seq 1 10 0 1 10 -1 -1 +CREATE SEQUENCE seq2 start = -9 minvalue -10 maxvalue 10 increment -1 cache 15; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq2'; +TABLE_CATALOG SEQUENCE_SCHEMA SEQUENCE_NAME CACHE CACHE_VALUE CYCLE INCREMENT MAX_VALUE MIN_VALUE START COMMENT +def executor__infoschema_reader seq2 1 15 0 -1 10 -10 -9 +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME , TABLE_TYPE, ENGINE, TABLE_ROWS FROM information_schema.tables WHERE TABLE_TYPE='SEQUENCE' AND TABLE_NAME='seq2' and table_schema='executor__infoschema_reader'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME TABLE_TYPE ENGINE TABLE_ROWS +def executor__infoschema_reader seq2 SEQUENCE InnoDB 1 +drop table if exists t_int, t_implicit, t_common; +create table t_int (a int primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_int'; +TIDB_PK_TYPE +CLUSTERED +set tidb_enable_clustered_index=int_only; +create table t_implicit (a varchar(64) primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_implicit'; +TIDB_PK_TYPE +NONCLUSTERED +set tidb_enable_clustered_index=on; +create table t_common (a varchar(64) primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_common'; +TIDB_PK_TYPE +CLUSTERED +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'INFORMATION_SCHEMA' and table_name = 'TABLES'; +TIDB_PK_TYPE +NONCLUSTERED +set tidb_enable_clustered_index=default; +drop table if exists t; +CREATE TABLE t ( id int DEFAULT NULL); +CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`1.1.1.1` SQL SECURITY DEFINER VIEW `v_test` (`type`) AS SELECT NULL AS `type` FROM `t` AS `f`; +select * from information_schema.columns where TABLE_SCHEMA = 'executor__infoschema_reader' and TABLE_NAME = 'v_test'; +TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT GENERATION_EXPRESSION +def executor__infoschema_reader v_test type 1 NULL YES binary 0 0 NULL NULL NULL NULL NULL binary(0) select,insert,update,references +drop user if exists usageuser; +create user usageuser; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; +GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE +'usageuser'@'%' def USAGE NO +GRANT SELECT ON *.* to usageuser; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; +GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE +'usageuser'@'%' def SELECT NO +GRANT SELECT ON *.* to usageuser WITH GRANT OPTION; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; +GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE +'usageuser'@'%' def SELECT YES +GRANT BACKUP_ADMIN ON *.* to usageuser; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'" ORDER BY privilege_type; +GRANTEE TABLE_CATALOG PRIVILEGE_TYPE IS_GRANTABLE +'usageuser'@'%' def BACKUP_ADMIN NO +'usageuser'@'%' def SELECT YES diff --git a/tests/integrationtest/r/executor/insert.result b/tests/integrationtest/r/executor/insert.result new file mode 100644 index 0000000000000..b45d43d6129da --- /dev/null +++ b/tests/integrationtest/r/executor/insert.result @@ -0,0 +1,1217 @@ +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(a char(20), b int, primary key(a)); +insert into t values('aa', 1), ('bb', 1); +insert into t values('aa', 2); +Error 1062 (23000): Duplicate entry 'aa' for key 't.PRIMARY' +drop table t; +create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c)); +insert into t values ('a', 'b', 'c'), ('b', 'a', 'c'); +insert into t values ('a', 'b', 'c'); +Error 1062 (23000): Duplicate entry 'a-b-c' for key 't.PRIMARY' +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1; +c1 +1.0000 +set tidb_enable_clustered_index = default; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2004'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = 2004; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 bit); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 smallint unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 int unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 smallint); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 int); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 decimal(6,4)); +insert into t1 set c1 = '1.1'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 decimal); +insert into t1 set c1 = 1.1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 numeric); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 float); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.3; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 real); +insert into t1 set c1 = 1.4; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 date); +insert into t1 set c1 = '2020-01-01'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 time); +insert into t1 set c1 = '20:00:00'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 datetime); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 timestamp); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2020'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 char(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 varchar(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 binary(3)); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 varbinary(3)); +insert into t1 set c1 = 'b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 blob); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 text); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 enum('a', 'b')); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists t1; +create table t1(c1 set('a', 'b')); +insert into t1 set c1 = 'a,b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +drop table if exists c; +create table c(i int,j int,k int,primary key(i,j,k)); +insert into c values(1,2,3); +insert into c values(1,2,4); +update c set i=1,j=2,k=4 where i=1 and j=2 and k=3; +Error 1062 (23000): Duplicate entry '1-2-4' for key 'c.PRIMARY' +drop table if exists t1, t2; +create table t1 (a int(11) ,b varchar(100) ,primary key (a)); +create table t2 (c int(11) ,d varchar(100) ,primary key (c)); +prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d'; +execute in1; + +drop table if exists t1; +create table t1(a bigint); +insert into t1 values("asfasdfsajhlkhlksdaf"); +Error 1366 (HY000): Incorrect bigint value: 'asfasdfsajhlkhlksdaf' for column 'a' at row 1 +drop table if exists t1; +create table t1(a varchar(10)) charset ascii; +insert into t1 values('我'); +Error 1366 (HY000): Incorrect string value '\xE6\x88\x91' for column 'a' +drop table if exists t1; +create table t1(a char(10) charset utf8); +insert into t1 values('我'); +alter table t1 add column b char(10) charset ascii as ((a)); +select * from t1; +a b +我 ? +drop table if exists t; +create table t (a year); +insert into t values(2156); +Error 1264 (22003): Out of range value for column 'a' at row 1 +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL); +SET @@sql_mode=''; +INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00'); +SHOW WARNINGS; +Level Code Message +Warning 1292 Incorrect timestamp value: '1018-12-23 00:00:00' for column 'time1' at row 1 +SELECT * FROM ts ORDER BY id; +id time1 +1 0000-00-00 00:00:00 +SET @@sql_mode='STRICT_TRANS_TABLES'; +INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00'); +Error 1292 (22007): Incorrect timestamp value: '1018-12-24 00:00:00' for column 'time1' at row 1 +DROP TABLE ts; +CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (194626268); +INSERT IGNORE INTO t0(c0) VALUES ('*'); +SHOW WARNINGS; +Level Code Message +Warning 1366 Incorrect smallint value: '*' for column 'c0' at row 1 +Warning 1690 constant 32768 overflows smallint +Warning 1467 Failed to read auto-increment value from storage engine +SET @@sql_mode=default; +drop table if exists t1; +create table t1(a decimal(15,2)); +insert into t1 values (1111111111111.01); +select * from t1; +a +1111111111111.01 +select cast(a as decimal) from t1; +cast(a as decimal) +9999999999 +drop table if exists t1; +create table t1(a json, b int, unique index idx((cast(a as signed array)))); +insert into t1 values ('[1,11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert into t1 values ('[2, 222]', 2); +Error 1062 (23000): Duplicate entry '2' for key 't1.idx' +replace into t1 values ('[1, 10]', 10); +select * from t1; +a b +[2, 22] 2 +[1, 10] 10 +replace into t1 values ('[1, 2]', 1); +select * from t1; +a b +[1, 2] 1 +replace into t1 values ('[1, 11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert ignore into t1 values ('[1]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert ignore into t1 values ('[1, 2]', 2); +select * from t1; +a b +[1, 11] 1 +[2, 22] 2 +insert into t1 values ('[2]', 2) on duplicate key update b = 10; +select * from t1; +a b +[1, 11] 1 +[2, 22] 10 +insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]'; +Error 1062 (23000): Duplicate entry '[1, 2]' for key 't1.idx' +set time_zone="+09:00"; +drop table if exists t; +create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP); +set TIMESTAMP = 1234; +insert t (id) values (1); +select * from t; +id c1 +1 1970-01-01 09:20:34 +drop table if exists t; +create table t (dt datetime); +set @@time_zone='+08:00'; +delete from t; +insert into t values ('2020-10-22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10-22-16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22 16-31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16:31-15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16:31:15-10'); +select * from t; +dt +2020-10-23 10:31:15 +delete from t; +insert into t values ('2020.10-22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10.22-16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22.16-31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16.31-15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16.31.15+14'); +select * from t; +dt +2020-10-22 10:31:15 +delete from t; +insert into t values ('2020-10:22'); +select * from t; +dt +2020-10-22 00:00:00 +delete from t; +insert into t values ('2020-10-22:16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10-22-16:31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22 16-31:15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16.31.15+09:30'); +select * from t; +dt +2020-10-22 15:01:15 +delete from t; +insert into t values ('2020.10-22:16'); +select * from t; +dt +2020-10-22 16:00:00 +delete from t; +insert into t values ('2020-10.22-16:31'); +select * from t; +dt +2020-10-22 16:31:00 +delete from t; +insert into t values ('2020-10-22.16-31:15'); +select * from t; +dt +2020-10-22 16:31:15 +delete from t; +insert into t values ('2020-10-22T16:31.15+09:30'); +select * from t; +dt +2020-10-22 15:01:15 +drop table if exists t; +create table t (dt datetime, ts timestamp); +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+00:00'; +select * from t; +dt ts +2020-10-23 00:53:40 2020-10-22 16:53:40 +delete from t; +set @@time_zone='-08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 08:53:40 2020-10-23 00:53:40 +delete from t; +set @@time_zone='-03:00'; +insert into t values ('2020-10-22T16:53:40+03:00', '2020-10-22T16:53:40+03:00'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 10:53:40 2020-10-22 21:53:40 +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40+08:00', '2020-10-22T16:53:40+08:00'); +set @@time_zone='+08:00'; +select * from t; +dt ts +2020-10-22 16:53:40 2020-10-22 16:53:40 +drop table if exists t; +create table t (ts timestamp); +insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z'); +select count(*) from t where ts > '2020-10-22T12:00:00Z'; +count(*) +2 +set @@time_zone='+08:00'; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10+00:00', '2020-10-27T14:39:10.10+00:00'); +select * from t; +dt ts +2020-10-27 22:39:10.10 2020-10-27 22:39:10.10 +drop table if exists t; +create table t (dt datetime(1), ts timestamp(1)); +insert into t values ('2020-10-27T14:39:10.3+0200', '2020-10-27T14:39:10.3+0200'); +select * from t; +dt ts +2020-10-27 20:39:10.3 2020-10-27 20:39:10.3 +drop table if exists t; +create table t (dt datetime(6), ts timestamp(6)); +insert into t values ('2020-10-27T14:39:10.3-02', '2020-10-27T14:39:10.3-02'); +select * from t; +dt ts +2020-10-28 00:39:10.300000 2020-10-28 00:39:10.300000 +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10Z', '2020-10-27T14:39:10.10Z'); +select * from t; +dt ts +2020-10-27 22:39:10.10 2020-10-27 22:39:10.10 +set time_zone=default; +set timestamp=default; +drop table if exists t1; +create table t1(a year(4)); +insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79"); +select * from t1; +a +0000 +0000 +0000 +2000 +2000 +2000 +1979 +1979 +drop table if exists t; +create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(); +select * from t; +f_year +0000 +insert into t values('0000'); +select * from t; +f_year +0000 +0000 +drop table if exists t1, t2, t3, t4; +create table t1(d date); +create table t2(d datetime); +create table t3(d date); +create table t4(d datetime); +set sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +select year(d), month(d), day(d) from t2; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +truncate t1; +truncate t2; +truncate t3; +truncate t4; +set sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +select year(d), month(d), day(d) from t2; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +year(d) month(d) day(d) +0 0 0 +2019 0 0 +2019 1 0 +2019 0 1 +2019 2 31 +set sql_mode=default; +drop table if exists t1, t2, t3; +create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000)); +insert into t1 set a=1, b=1; +insert into t1 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t1; +a b +1 1 +create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4; +insert into t2 set a=1,b=1; +insert into t2 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t2; +a b +1 1 +CREATE TABLE t3 (a int, b int, c int, d int, e int, +PRIMARY KEY (a,b), +UNIQUE KEY (b,c,d) +) PARTITION BY RANGE ( b ) ( +PARTITION p0 VALUES LESS THAN (4), +PARTITION p1 VALUES LESS THAN (7), +PARTITION p2 VALUES LESS THAN (11) +); +insert into t3 values (1,2,3,4,5); +insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e); +select * from t3; +a b c d e +1 2 3 4 16 +drop table if exists t1; +create table t1 (a bit(3)); +insert into t1 values(-1); +Error 1406 (22001): Data too long for column 'a' at row 1 +insert into t1 values(9); +Error 1406 (22001): Data too long for column 'a' at row 1 +create table t64 (a bit(64)); +insert into t64 values(-1); +insert into t64 values(18446744073709551615); +insert into t64 values(18446744073709551616); +Error 1264 (22003): Out of range value for column 'a' at row 1 +drop table if exists bug; +create table bug (a varchar(100)); +insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no; +select * from bug; +a +20180531557 +20190430140319679394 +drop table if exists t; +create table t (a int, b double); +insert into t values (ifnull('',0)+0, 0); +insert into t values (0, ifnull('',0)+0); +select * from t; +a b +0 0 +0 0 +insert into t values ('', 0); +Error 1366 (HY000): Incorrect int value: '' for column 'a' at row 1 +insert into t values (0, ''); +Error 1366 (HY000): Incorrect double value: '' for column 'b' at row 1 +update t set a = ''; +Error 1292 (22007): Truncated incorrect DOUBLE value: '' +update t set b = ''; +Error 1292 (22007): Truncated incorrect DOUBLE value: '' +update t set a = ifnull('',0)+0; +update t set b = ifnull('',0)+0; +delete from t where a = ''; +select * from t; +a b +drop table if exists t,t1; +create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2)); +insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +Error 1264 (22003): Out of range value for column 'col1' at row 1 +insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +Error 1264 (22003): Out of range value for column 'col2' at row 1 +create table t1(id1 float,id2 float); +insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999); +select @@warning_count; +@@warning_count +2 +select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1; +convert(id1,decimal(65)) convert(id2,decimal(65)) +340282346638528860000000000000000000000 -340282346638528860000000000000000000000 +set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +drop table if exists t1; +CREATE TABLE t1(c1 mediumtext); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215)); +Error 1406 (22001): Data too long for column 'c1' at row 1 +set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +select length(c1) from t1; +length(c1) +254 +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +select length(c1) from t1; +length(c1) +65534 +set sql_mode = default; +set @@allow_auto_random_explicit_insert = true; +drop table if exists ar; +create table ar (id bigint key clustered auto_random, name char(10)); +insert into ar(id) values (1); +select id from ar; +id +1 +select last_insert_id(); +last_insert_id() +0 +delete from ar; +insert into ar(id) values (1), (2); +select id from ar; +id +1 +2 +select last_insert_id(); +last_insert_id() +0 +delete from ar; +drop table ar; +set @@allow_auto_random_explicit_insert = default; +drop table if exists t, t1; +create table t (a int primary key, b datetime, d date); +insert into t values (1, '2019-02-11 30:00:00', '2019-01-31'); +Error 1292 (22007): Incorrect datetime value: '2019-02-11 30:00:00' for column 'b' at row 1 +CREATE TABLE t1 (a BINARY(16) PRIMARY KEY); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +Error 1062 (23000): Duplicate entry '{ W]\xA1\x06u\x9D\xBD\xB1\xA3.\xE2\xD9\xA7t' for key 't1.PRIMARY' +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +Error 1062 (23000): Duplicate entry '\x0C\x1E\x8DG`\xEB\x93 F&BC\xF0\xB5\xF4\xB7' for key 't1.PRIMARY' +drop table if exists t1; +create table t1 (a bit primary key) engine=innodb; +insert into t1 values (b'0'); +insert into t1 values (b'0'); +Error 1062 (23000): Duplicate entry '\x00' for key 't1.PRIMARY' +drop table if exists t; +create table t(c numeric primary key); +insert ignore into t values(null); +insert into t values(0); +Error 1062 (23000): Duplicate entry '0' for key 't.PRIMARY' +set tidb_enable_clustered_index = on; +drop table if exists t1pk; +create table t1pk(id varchar(200) primary key, v int); +insert into t1pk(id, v) values('abc', 1); +select * from t1pk; +id v +abc 1 +set @@tidb_constraint_check_in_place=true; +insert into t1pk(id, v) values('abc', 2); +Error 1062 (23000): Duplicate entry 'abc' for key 't1pk.PRIMARY' +set @@tidb_constraint_check_in_place=false; +insert into t1pk(id, v) values('abc', 3); +Error 1062 (23000): Duplicate entry 'abc' for key 't1pk.PRIMARY' +select v, id from t1pk; +v id +1 abc +select id from t1pk where id = 'abc'; +id +abc +select v, id from t1pk where id = 'abc'; +v id +1 abc +drop table if exists t3pk; +create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1); +select * from t3pk; +id1 id2 v id3 +abc xyz 1 100 +set @@tidb_constraint_check_in_place=true; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2); +Error 1062 (23000): Duplicate entry 'abc-xyz-100' for key 't3pk.PRIMARY' +set @@tidb_constraint_check_in_place=false; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3); +Error 1062 (23000): Duplicate entry 'abc-xyz-100' for key 't3pk.PRIMARY' +select v, id3, id2, id1 from t3pk; +v id3 id2 id1 +1 100 xyz abc +select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +id3 id2 id1 +100 xyz abc +select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +id3 id2 id1 v +100 xyz abc 1 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1); +insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1); +drop table if exists t1pku; +create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into t1pku(id, uk, v) values('abc', 1, 2); +select * from t1pku where id = 'abc'; +id uk v +abc 1 2 +insert into t1pku(id, uk, v) values('aaa', 1, 3); +Error 1062 (23000): Duplicate entry '1' for key 't1pku.ukk' +select * from t1pku; +id uk v +abc 1 2 +select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101)); +id1 id2 v id3 +abc xyz 1 100 +abc xyz 1 101 +abc zzz 1 101 +set @@tidb_constraint_check_in_place=default; +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists it1pk; +create table it1pk(id varchar(200) primary key, v int); +insert into it1pk(id, v) values('abc', 1); +insert ignore into it1pk(id, v) values('abc', 2); +select * from it1pk where id = 'abc'; +id v +abc 1 +drop table if exists it2pk; +create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2)); +insert into it2pk(id1, id2, v) values('abc', 'cba', 1); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +id1 id2 v +abc cba 1 +insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +id1 id2 v +abc cba 1 +drop table if exists it1pku; +create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into it1pku(id, uk, v) values('abc', 1, 2); +select * from it1pku where id = 'abc'; +id uk v +abc 1 2 +insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1); +select * from it1pku; +id uk v +abc 1 2 +bbb 2 1 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists dt1pi; +create table dt1pi(id varchar(200) primary key, v int); +insert into dt1pi(id, v) values('abb', 1),('acc', 2); +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1; +select * from dt1pi; +id v +abb 2 +acc 2 +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1pi; +id v +acc 2 +xxx 3 +drop table if exists dt1piu; +create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20); +insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1; +select * from dt1piu; +id uk v +abb 1 11 +acc 2 20 +insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1piu; +id uk v +acc 2 20 +xxx 1 12 +drop table if exists ts1pk; +create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2)); +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 1); +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:11 2018-01-01 11:11:11 1 +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v); +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:11 2018-01-01 11:11:11 2 +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v), id1 = '2018-01-01 11:11:12'; +select id1, id2, v from ts1pk; +id1 id2 v +2018-01-01 11:11:12 2018-01-01 11:11:11 2 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists pkt1; +CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b)); +insert into pkt1 values ('aaa',1); +select b from pkt1 where b = 1; +b +1 +drop table if exists pkt2; +CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b)); +insert into pkt2 values ('aaa',1); +select b from pkt2 where b = 1; +b +1 +drop table if exists issue_18232; +create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c)); +select a from issue_18232 use index (idx); +a +select b from issue_18232 use index (idx); +b +select a,b from issue_18232 use index (idx); +a b +select c from issue_18232 use index (idx); +c +select a,c from issue_18232 use index (idx); +a c +select b,c from issue_18232 use index (idx); +b c +select a,b,c from issue_18232 use index (idx); +a b c +select d from issue_18232 use index (idx); +d +select a,d from issue_18232 use index (idx); +a d +select b,d from issue_18232 use index (idx); +b d +select a,b,d from issue_18232 use index (idx); +a b d +select c,d from issue_18232 use index (idx); +c d +select a,c,d from issue_18232 use index (idx); +a c d +select b,c,d from issue_18232 use index (idx); +b c d +select a,b,c,d from issue_18232 use index (idx); +a b c d +set tidb_enable_clustered_index = default; +drop table if exists t1, t2; +create table t1(a year, primary key(a)); +insert ignore into t1 values(null); +create table t2(a int, key(a)); +insert into t2 values(0); +select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +a a +0000 0 +drop table if exists vctt; +create table vctt (v varchar(4), c char(4)); +insert into vctt values ('ab ', 'ab '); +select * from vctt; +v c +ab ab +delete from vctt; +insert into vctt values ('ab\n\n\n', 'ab\n\n\n'), ('ab\t\t\t', 'ab\t\t\t'), ('ab ', 'ab '), ('ab\r\r\r', 'ab\r\r\r'); +show warnings; +Level Code Message +Warning 1265 Data truncated for column 'v' at row 1 +Warning 1265 Data truncated for column 'v' at row 2 +Warning 1265 Data truncated for column 'v' at row 3 +Warning 1265 Data truncated for column 'v' at row 4 +select * from vctt; +v c +ab + + ab + + +ab ab +ab ab +ab ab +select length(v), length(c) from vctt; +length(v) length(c) +4 4 +4 4 +4 2 +4 4 +drop table if exists t1; +create table t1(a int, b varchar(20), primary key(a,b(3)) clustered); +insert into t1 values(1,'aaaaa'); +insert into t1 values(1,'aaaaa'); +Error 1062 (23000): Duplicate entry '1-aaa' for key 't1.PRIMARY' +insert into t1 select 1, 'aaa'; +Error 1062 (23000): Duplicate entry '1-aaa' for key 't1.PRIMARY' +insert into t1 select 1, 'bb'; +insert into t1 select 1, 'bb'; +Error 1062 (23000): Duplicate entry '1-bb' for key 't1.PRIMARY' +drop table if exists bintest; +create table bintest (h enum(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; +h +a +drop table if exists bintest; +create table bintest (h set(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; +h +a +drop table if exists temp_test; +create global temporary table temp_test(id int primary key auto_increment) on commit delete rows; +insert into temp_test(id) values(0); +select * from temp_test; +id +begin; +insert into temp_test(id) values(0); +select * from temp_test; +id +1 +commit; +begin; +insert into temp_test(id) values(0); +select * from temp_test; +id +1 +insert into temp_test(id) values(0); +select id from temp_test order by id; +id +1 +2 +commit; +begin; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +1 +2 +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +1 +2 +3 +4 +commit; +begin; +insert into temp_test(id) values(10); +insert into temp_test(id) values(0); +select id from temp_test order by id; +id +10 +11 +insert into temp_test(id) values(20), (30); +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +id +10 +11 +20 +30 +31 +32 +commit; +drop table if exists temp_test; +drop table if exists temp_test; +create global temporary table temp_test(id int) on commit delete rows; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +1 +commit; +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +_tidb_rowid +1 +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +commit; +begin; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +_tidb_rowid +1 +2 +3 +4 +commit; +drop table if exists temp_test; +drop table if exists t1; +create table t1(c1 date); +insert into t1 values('2020-02-31'); +Error 1292 (22007): Incorrect date value: '2020-02-31' for column 'c1' at row 1 +set @@sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values('2020-02-31'); +select * from t1; +c1 +2020-02-31 +set @@sql_mode='STRICT_TRANS_TABLES'; +insert into t1 values('2020-02-31'); +Error 1292 (22007): Incorrect date value: '2020-02-31' for column 'c1' at row 1 +set sql_mode=default; +drop table if exists t; +create table t (id decimal(10)); +insert into t values('1sdf'); +Error 1366 (HY000): Incorrect decimal value: '1sdf' for column 'id' at row 1 +insert into t values('1edf'); +Error 1366 (HY000): Incorrect decimal value: '1edf' for column 'id' at row 1 +insert into t values('12Ea'); +Error 1366 (HY000): Incorrect decimal value: '12Ea' for column 'id' at row 1 +insert into t values('1E'); +Error 1366 (HY000): Incorrect decimal value: '1E' for column 'id' at row 1 +insert into t values('1e'); +Error 1366 (HY000): Incorrect decimal value: '1e' for column 'id' at row 1 +insert into t values('1.2A'); +Error 1366 (HY000): Incorrect decimal value: '1.2A' for column 'id' at row 1 +insert into t values('1.2.3.4.5'); +Error 1366 (HY000): Incorrect decimal value: '1.2.3.4.5' for column 'id' at row 1 +insert into t values('1.2.'); +Error 1366 (HY000): Incorrect decimal value: '1.2.' for column 'id' at row 1 +insert into t values('1,999.00'); +Error 1366 (HY000): Incorrect decimal value: '1,999.00' for column 'id' at row 1 +insert into t values('12e-3'); +show warnings; +Level Code Message +Warning 1366 Incorrect decimal value: '12e-3' for column 'id' at row 1 +select id from t; +id +0 +drop table if exists t; +SET sql_mode='NO_ENGINE_SUBSTITUTION'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a tinyint not null auto_increment primary key, b char(20)); +INSERT INTO t1 VALUES (127,'maxvalue'); +REPLACE INTO t1 VALUES (0,'newmaxvalue'); +Error 1467 (HY000): Failed to read auto-increment value from storage engine +set sql_mode=default; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT) ENGINE = InnoDB; +INSERT IGNORE into t1(SELECT SLEEP(NULL)); +SHOW WARNINGS; +Level Code Message +Warning 1210 Incorrect arguments to sleep +INSERT IGNORE into t1(SELECT SLEEP(-1)); +SHOW WARNINGS; +Level Code Message +Warning 1210 Incorrect arguments to sleep +INSERT IGNORE into t1(SELECT SLEEP(1)); +SELECT * FROM t1; +a +0 +0 +0 +DROP TABLE t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 values(999.99); +select cast(t1.c1 as decimal(4, 1)) from t1; +cast(t1.c1 as decimal(4, 1)) +999.9 +select cast(t1.c1 as decimal(5, 1)) from t1; +cast(t1.c1 as decimal(5, 1)) +1000.0 +drop table if exists t1; +create table t1(c1 decimal(6, 4)); +insert into t1 values(99.9999); +select cast(t1.c1 as decimal(5, 3)) from t1; +cast(t1.c1 as decimal(5, 3)) +99.999 +select cast(t1.c1 as decimal(6, 3)) from t1; +cast(t1.c1 as decimal(6, 3)) +100.000 +drop table if exists t1; +create table t1(id int, a int); +set @@SQL_MODE='STRICT_TRANS_TABLES'; +insert into t1 values(1, '1e100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +insert into t1 values(2, '-1e100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +select id, a from t1; +id a +set @@SQL_MODE=''; +insert into t1 values(1, '1e100'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +insert into t1 values(2, '-1e100'); +show warnings; +Level Code Message +Warning 1264 Out of range value for column 'a' at row 1 +select id, a from t1 order by id asc; +id a +1 2147483647 +2 -2147483648 +set sql_mode=default; +drop table if exists tf; +create table tf(a float(1, 0) unsigned); +insert into tf values('-100'); +Error 1264 (22003): Out of range value for column 'a' at row 1 +set @@sql_mode=''; +insert into tf values('-100'); +select * from tf; +a +0 +set @@sql_mode=default; diff --git a/tests/integrationtest/r/executor/inspection_common.result b/tests/integrationtest/r/executor/inspection_common.result new file mode 100644 index 0000000000000..cb36c9d1dd0fd --- /dev/null +++ b/tests/integrationtest/r/executor/inspection_common.result @@ -0,0 +1,12 @@ +select count(*) from information_schema.inspection_rules; +count(*) +15 +select count(*) from information_schema.inspection_rules where type='inspection'; +count(*) +5 +select count(*) from information_schema.inspection_rules where type='summary'; +count(*) +10 +select count(*) from information_schema.inspection_rules where type='inspection' and type='summary'; +count(*) +0 diff --git a/tests/integrationtest/r/executor/jointest/hash_join.result b/tests/integrationtest/r/executor/jointest/hash_join.result new file mode 100644 index 0000000000000..3f253e7607002 --- /dev/null +++ b/tests/integrationtest/r/executor/jointest/hash_join.result @@ -0,0 +1,308 @@ +drop table if exists t, s; +create table t(a int, index(a)); +create table s(a int, index(a)); +insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12), (13), (14), (15), (16), (17), (18), (19), (20), (21), (22), (23), (24), (25), (26), (27), (28), (29), (30), (31), (32), (33), (34), (35), (36), (37), (38), (39), (40), (41), (42), (43), (44), (45), (46), (47), (48), (49), (50), (51), (52), (53), (54), (55), (56), (57), (58), (59), (60), (61), (62), (63), (64), (65), (66), (67), (68), (69), (70), (71), (72), (73), (74), (75), (76), (77), (78), (79), (80), (81), (82), (83), (84), (85), (86), (87), (88), (89), (90), (91), (92), (93), (94), (95), (96), (97), (98), (99), (100), (101), (102), (103), (104), (105), (106), (107), (108), (109), (110), (111), (112), (113), (114), (115), (116), (117), (118), (119), (120), (121), (122), (123), (124), (125), (126), (127), (128); +insert into s values(1), (128); +set @@tidb_max_chunk_size=32; +set @@tidb_index_lookup_join_concurrency=1; +set @@tidb_index_join_batch_size=32; +desc format = 'brief' select /*+ INL_HASH_JOIN(s) */ * from t join s on t.a=s.a order by t.a; +id estRows task access object operator info +IndexHashJoin 12487.50 root inner join, inner:IndexReader, outer key:executor__jointest__hash_join.t.a, inner key:executor__jointest__hash_join.s.a, equal cond:eq(executor__jointest__hash_join.t.a, executor__jointest__hash_join.s.a) +├─IndexReader(Build) 9990.00 root index:IndexFullScan +│ └─IndexFullScan 9990.00 cop[tikv] table:t, index:a(a) keep order:true, stats:pseudo +└─IndexReader(Probe) 12487.50 root index:Selection + └─Selection 12487.50 cop[tikv] not(isnull(executor__jointest__hash_join.s.a)) + └─IndexRangeScan 12500.00 cop[tikv] table:s, index:a(a) range: decided by [eq(executor__jointest__hash_join.s.a, executor__jointest__hash_join.t.a)], keep order:false, stats:pseudo +select /*+ INL_HASH_JOIN(s) */ * from t join s on t.a=s.a order by t.a; +a a +1 1 +128 128 +set @@tidb_max_chunk_size=default; +set @@tidb_index_lookup_join_concurrency=default; +set @@tidb_index_join_batch_size=default; +drop table if exists t1, t2; +create table t1 (id int); +create table t2 (id int, name varchar(255), ts timestamp); +insert into t1 values (1); +insert into t2 values (1, 'xxx', '2003-06-09 10:51:26'); +select ts from t1 inner join t2 where t2.name = 'xxx'; +ts +2003-06-09 10:51:26 +set tidb_cost_model_version=2; +set @@tidb_init_chunk_size=2; +DROP TABLE IF EXISTS t; +CREATE TABLE `t` (`a` int, pk integer auto_increment,`b` char (20),primary key (pk)); +CREATE INDEX idx_t_a ON t(`a`); +CREATE INDEX idx_t_b ON t(`b`); +INSERT INTO t VALUES (148307968, DEFAULT, 'nndsjofmpdxvhqv') , (-1327693824, DEFAULT, 'pnndsjofmpdxvhqvfny') , (-277544960, DEFAULT, 'fpnndsjo'); +DROP TABLE IF EXISTS s; +CREATE TABLE `s` (`a` int, `b` char (20)); +CREATE INDEX idx_s_a ON s(`a`); +INSERT INTO s VALUES (-277544960, 'fpnndsjo') , (2, 'kfpnndsjof') , (2, 'vtdiockfpn'), (-277544960, 'fpnndsjo') , (2, 'kfpnndsjof') , (6, 'ckfp'); +select /*+ INL_JOIN(t, s) */ t.a from t join s on t.a = s.a; +a +-277544960 +-277544960 +select /*+ INL_HASH_JOIN(t, s) */ t.a from t join s on t.a = s.a; +a +-277544960 +-277544960 +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t join s on t.a = s.a; +a +-277544960 +-277544960 +select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a; +a +-1327693824 +-277544960 +-277544960 +148307968 +select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a; +a +-1327693824 +-277544960 +-277544960 +148307968 +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a; +a +-1327693824 +-277544960 +-277544960 +148307968 +select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960; +a +-277544960 +-277544960 +select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960; +a +-277544960 +-277544960 +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960; +a +-277544960 +-277544960 +select /*+ INL_JOIN(t, s) */ t.a from t right join s on t.a = s.a; +a +NULL +NULL +NULL +NULL +-277544960 +-277544960 +select /*+ INL_HASH_JOIN(t, s) */ t.a from t right join s on t.a = s.a; +a +NULL +NULL +NULL +NULL +-277544960 +-277544960 +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t right join s on t.a = s.a; +a +NULL +NULL +NULL +NULL +-277544960 +-277544960 +select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc; +a +148307968 +-277544960 +-277544960 +-1327693824 +select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc; +a +148307968 +-277544960 +-277544960 +-1327693824 +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc; +a +148307968 +-277544960 +-277544960 +-1327693824 +DROP TABLE IF EXISTS t; +CREATE TABLE t(a BIGINT PRIMARY KEY, b BIGINT); +INSERT INTO t VALUES(1, 2); +SELECT /*+ INL_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a; +a b a b +1 2 1 2 +1 2 1 2 +SELECT /*+ INL_HASH_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_HASH_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a; +a b a b +1 2 1 2 +1 2 1 2 +SELECT /*+ INL_MERGE_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_MERGE_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a; +a b a b +1 2 1 2 +1 2 1 2 +drop table if exists t; +create table t(a decimal(6,2), index idx(a)); +insert into t values(1.01), (2.02), (NULL); +select /*+ INL_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a; +a +1.01 +2.02 +select /*+ INL_HASH_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a; +a +1.01 +2.02 +select /*+ INL_MERGE_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a; +a +1.01 +2.02 +drop table if exists t; +create table t(a bigint, b bigint, unique key idx1(a, b)); +insert into t values(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6); +set @@tidb_init_chunk_size = 2; +select /*+ INL_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4; +a b a b +1 1 NULL NULL +1 2 NULL NULL +1 3 NULL NULL +1 4 NULL NULL +1 5 1 1 +1 6 1 2 +select /*+ INL_HASH_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4; +a b a b +1 1 NULL NULL +1 2 NULL NULL +1 3 NULL NULL +1 4 NULL NULL +1 5 1 1 +1 6 1 2 +select /*+ INL_MERGE_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4; +a b a b +1 1 NULL NULL +1 2 NULL NULL +1 3 NULL NULL +1 4 NULL NULL +1 5 1 1 +1 6 1 2 +drop table if exists t1, t2, t3; +create table t1(a int primary key, b int); +insert into t1 values(1, 0), (2, null); +create table t2(a int primary key); +insert into t2 values(0); +select /*+ INL_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a; +a b a +1 0 0 +2 NULL NULL +select /*+ INL_HASH_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a; +a b a +1 0 0 +2 NULL NULL +select /*+ INL_MERGE_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a; +a b a +1 0 0 +2 NULL NULL +create table t3(a int, key(a)); +insert into t3 values(0); +select /*+ INL_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a; +a b a +1 0 0 +2 NULL NULL +select /*+ INL_HASH_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a; +a b a +1 0 0 +2 NULL NULL +select /*+ INL_MERGE_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a; +a b a +1 0 0 +2 NULL NULL +drop table if exists t,s; +create table t(a int primary key auto_increment, b time); +create table s(a int, b time); +alter table s add index idx(a,b); +set @@tidb_index_join_batch_size=4; +set @@tidb_init_chunk_size=1; +set @@tidb_max_chunk_size=32; +set @@tidb_index_lookup_join_concurrency=15; +set @@session.tidb_executor_concurrency = 4; +set @@session.tidb_hash_join_concurrency = 5; +insert into t values(0, '01:01:01'); +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into s select a, b - 1 from t; +analyze table t; +analyze table s; +desc format = 'brief' select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +id estRows task access object operator info +HashAgg 1.00 root funcs:count(1)->Column#6 +└─IndexJoin 64.00 root inner join, inner:IndexReader, outer key:executor__jointest__hash_join.t.a, inner key:executor__jointest__hash_join.s.a, equal cond:eq(executor__jointest__hash_join.t.a, executor__jointest__hash_join.s.a), other cond:lt(executor__jointest__hash_join.s.b, executor__jointest__hash_join.t.b) + ├─TableReader(Build) 64.00 root data:Selection + │ └─Selection 64.00 cop[tikv] not(isnull(executor__jointest__hash_join.t.b)) + │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false + └─IndexReader(Probe) 64.00 root index:Selection + └─Selection 64.00 cop[tikv] not(isnull(executor__jointest__hash_join.s.a)), not(isnull(executor__jointest__hash_join.s.b)) + └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(executor__jointest__hash_join.s.a, executor__jointest__hash_join.t.a) lt(executor__jointest__hash_join.s.b, executor__jointest__hash_join.t.b)], keep order:false +select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +count(*) +64 +set @@tidb_index_lookup_join_concurrency=1; +select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +count(*) +64 +desc format = 'brief' select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +id estRows task access object operator info +HashAgg 1.00 root funcs:count(1)->Column#6 +└─IndexMergeJoin 64.00 root inner join, inner:IndexReader, outer key:executor__jointest__hash_join.t.a, inner key:executor__jointest__hash_join.s.a, other cond:lt(executor__jointest__hash_join.s.b, executor__jointest__hash_join.t.b) + ├─TableReader(Build) 64.00 root data:Selection + │ └─Selection 64.00 cop[tikv] not(isnull(executor__jointest__hash_join.t.b)) + │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false + └─IndexReader(Probe) 64.00 root index:Selection + └─Selection 64.00 cop[tikv] not(isnull(executor__jointest__hash_join.s.a)), not(isnull(executor__jointest__hash_join.s.b)) + └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(executor__jointest__hash_join.s.a, executor__jointest__hash_join.t.a) lt(executor__jointest__hash_join.s.b, executor__jointest__hash_join.t.b)], keep order:true +select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +count(*) +64 +set @@tidb_index_lookup_join_concurrency=1; +select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +count(*) +64 +desc format = 'brief' select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +id estRows task access object operator info +HashAgg 1.00 root funcs:count(1)->Column#6 +└─IndexHashJoin 64.00 root inner join, inner:IndexReader, outer key:executor__jointest__hash_join.t.a, inner key:executor__jointest__hash_join.s.a, equal cond:eq(executor__jointest__hash_join.t.a, executor__jointest__hash_join.s.a), other cond:lt(executor__jointest__hash_join.s.b, executor__jointest__hash_join.t.b) + ├─TableReader(Build) 64.00 root data:Selection + │ └─Selection 64.00 cop[tikv] not(isnull(executor__jointest__hash_join.t.b)) + │ └─TableFullScan 64.00 cop[tikv] table:t keep order:false + └─IndexReader(Probe) 64.00 root index:Selection + └─Selection 64.00 cop[tikv] not(isnull(executor__jointest__hash_join.s.a)), not(isnull(executor__jointest__hash_join.s.b)) + └─IndexRangeScan 64.00 cop[tikv] table:s, index:idx(a, b) range: decided by [eq(executor__jointest__hash_join.s.a, executor__jointest__hash_join.t.a) lt(executor__jointest__hash_join.s.b, executor__jointest__hash_join.t.b)], keep order:false +select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +count(*) +64 +set @@tidb_index_lookup_join_concurrency=1; +select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +count(*) +64 +drop table t1, t2; +create table t1(id int primary key); +create table t2(a int, b int); +insert into t1 values(1); +insert into t2 values(1,1),(2,1); +select /*+ inl_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id; +id a b +1 1 1 +select /*+ inl_hash_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id; +id a b +1 1 1 +select /*+ inl_merge_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id; +id a b +1 1 1 +set tidb_cost_model_version=default; +set @@tidb_init_chunk_size=default; +set @@tidb_index_join_batch_size=default; +set @@tidb_init_chunk_size=default; +set @@tidb_max_chunk_size=default; +set @@tidb_index_lookup_join_concurrency=default; +set @@session.tidb_executor_concurrency = default; +set @@session.tidb_hash_join_concurrency = default; diff --git a/tests/integrationtest/r/executor/join.result b/tests/integrationtest/r/executor/jointest/join.result similarity index 93% rename from tests/integrationtest/r/executor/join.result rename to tests/integrationtest/r/executor/jointest/join.result index f2a24004f2ad2..ac36ba737e673 100644 --- a/tests/integrationtest/r/executor/join.result +++ b/tests/integrationtest/r/executor/jointest/join.result @@ -220,7 +220,7 @@ insert into t1 values(1, 100), (2, 100), (3, 100), (4, 100), (5, 100); insert into t2 select a*100, b*100 from t1; explain format = 'brief' select /*+ TIDB_SMJ(t2) */ * from t1 left outer join t2 on t1.a=t2.a and t1.a!=3 order by t1.a; id estRows task access object operator info -MergeJoin 10000.00 root left outer join, left key:executor__join.t1.a, right key:executor__join.t2.a, left cond:[ne(executor__join.t1.a, 3)] +MergeJoin 10000.00 root left outer join, left key:executor__jointest__join.t1.a, right key:executor__jointest__join.t2.a, left cond:[ne(executor__jointest__join.t1.a, 3)] ├─TableReader(Build) 6666.67 root data:TableRangeScan │ └─TableRangeScan 6666.67 cop[tikv] table:t2 range:[-inf,3), (3,+inf], keep order:true, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan @@ -639,7 +639,7 @@ create table events (clock int, source int); SELECT * FROM events e JOIN (SELECT MAX(clock) AS clock FROM events e2 GROUP BY e2.source) e3 ON e3.clock=e.clock; clock source clock SELECT * FROM events e JOIN (SELECT clock FROM events e2 GROUP BY e2.source) e3 ON e3.clock=e.clock; -Error 1055 (42000): Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'executor__join.e2.clock' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by +Error 1055 (42000): Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'executor__jointest__join.e2.clock' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by drop table if exists tpj1,tpj2; create table tpj1 (id int, b int, unique index (id)); create table tpj2 (id int, b int, unique index (id)); @@ -719,12 +719,12 @@ insert t1 values (1,2), (10,20), (0,0); insert t2 values (1,3), (100,200), (0,0); explain format = 'brief' select * from t1 natural join t2; id estRows task access object operator info -HashJoin 12487.50 root inner join, equal:[eq(executor__join.t1.a, executor__join.t2.a)] +HashJoin 12487.50 root inner join, equal:[eq(executor__jointest__join.t1.a, executor__jointest__join.t2.a)] ├─TableReader(Build) 9990.00 root data:Selection -│ └─Selection 9990.00 cop[tikv] not(isnull(executor__join.t2.a)) +│ └─Selection 9990.00 cop[tikv] not(isnull(executor__jointest__join.t2.a)) │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo └─TableReader(Probe) 9990.00 root data:Selection - └─Selection 9990.00 cop[tikv] not(isnull(executor__join.t1.a)) + └─Selection 9990.00 cop[tikv] not(isnull(executor__jointest__join.t1.a)) └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo select * from t1 natural join t2; a b c @@ -732,10 +732,10 @@ a b c 1 2 3 explain format = 'brief' select * from t1 natural left join t2 order by a; id estRows task access object operator info -Sort 12487.50 root executor__join.t1.a -└─HashJoin 12487.50 root left outer join, equal:[eq(executor__join.t1.a, executor__join.t2.a)] +Sort 12487.50 root executor__jointest__join.t1.a +└─HashJoin 12487.50 root left outer join, equal:[eq(executor__jointest__join.t1.a, executor__jointest__join.t2.a)] ├─TableReader(Build) 9990.00 root data:Selection - │ └─Selection 9990.00 cop[tikv] not(isnull(executor__join.t2.a)) + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__jointest__join.t2.a)) │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo @@ -746,11 +746,11 @@ a b c 10 20 NULL explain format = 'brief' select * from t1 natural right join t2 order by a; id estRows task access object operator info -Sort 12487.50 root executor__join.t2.a -└─Projection 12487.50 root executor__join.t2.a, executor__join.t2.c, executor__join.t1.b - └─HashJoin 12487.50 root right outer join, equal:[eq(executor__join.t1.a, executor__join.t2.a)] +Sort 12487.50 root executor__jointest__join.t2.a +└─Projection 12487.50 root executor__jointest__join.t2.a, executor__jointest__join.t2.c, executor__jointest__join.t1.b + └─HashJoin 12487.50 root right outer join, equal:[eq(executor__jointest__join.t1.a, executor__jointest__join.t2.a)] ├─TableReader(Build) 9990.00 root data:Selection - │ └─Selection 9990.00 cop[tikv] not(isnull(executor__join.t1.a)) + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__jointest__join.t1.a)) │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo @@ -761,11 +761,11 @@ a c b 100 200 NULL explain format = 'brief' SELECT * FROM t1 NATURAL LEFT JOIN t2 WHERE not(t1.a <=> t2.a); id estRows task access object operator info -Projection 9990.00 root executor__join.t1.a, executor__join.t1.b, executor__join.t2.c -└─Selection 9990.00 root not(nulleq(executor__join.t1.a, executor__join.t2.a)) - └─HashJoin 12487.50 root left outer join, equal:[eq(executor__join.t1.a, executor__join.t2.a)] +Projection 9990.00 root executor__jointest__join.t1.a, executor__jointest__join.t1.b, executor__jointest__join.t2.c +└─Selection 9990.00 root not(nulleq(executor__jointest__join.t1.a, executor__jointest__join.t2.a)) + └─HashJoin 12487.50 root left outer join, equal:[eq(executor__jointest__join.t1.a, executor__jointest__join.t2.a)] ├─TableReader(Build) 9990.00 root data:Selection - │ └─Selection 9990.00 cop[tikv] not(isnull(executor__join.t2.a)) + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__jointest__join.t2.a)) │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo └─TableReader(Probe) 10000.00 root data:TableFullScan └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo @@ -853,3 +853,28 @@ select * from (t1 natural join t2) right outer join (t3 natural join t4) using ( c b y a 3 1 11 2 3 1 2 2 +drop table if exists t, t1, t2, t3; +create table t(a int, b enum('A', 'B')); +create table t1(a1 int, b1 enum('B', 'A') NOT NULL, UNIQUE KEY (b1)); +insert into t values (1, 'A'); +insert into t1 values (1, 'A'); +select /*+ INL_HASH_JOIN(t1) */ * from t join t1 on t.b = t1.b1; +a b a1 b1 +1 A 1 A +select /*+ INL_JOIN(t1) */ * from t join t1 on t.b = t1.b1; +a b a1 b1 +1 A 1 A +create table t2(a1 int, b1 enum('C', 'D') NOT NULL, UNIQUE KEY (b1)); +insert into t2 values (1, 'C'); +select /*+ INL_HASH_JOIN(t2) */ * from t join t2 on t.b = t2.b1; +a b a1 b1 +select /*+ INL_JOIN(t2) */ * from t join t2 on t.b = t2.b1; +a b a1 b1 +create table t3(a1 int, b1 enum('A', 'B') NOT NULL, UNIQUE KEY (b1)); +insert into t3 values (1, 'A'); +select /*+ INL_HASH_JOIN(t3) */ * from t join t3 on t.b = t3.b1; +a b a1 b1 +1 A 1 A +select /*+ INL_JOIN(t3) */ * from t join t3 on t.b = t3.b1; +a b a1 b1 +1 A 1 A diff --git a/tests/integrationtest/r/executor/merge_join.result b/tests/integrationtest/r/executor/merge_join.result new file mode 100644 index 0000000000000..bdd50b38aef8d --- /dev/null +++ b/tests/integrationtest/r/executor/merge_join.result @@ -0,0 +1,697 @@ +drop table if exists t; +drop table if exists t1; +create table t(c1 int, c2 int); +create table t1(c1 int, c2 int); +insert into t values(1,1),(2,2); +insert into t1 values(2,3),(4,4); +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 8000.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─MergeJoin 10000.00 root left outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 3336.66 root executor__merge_join.t1.c1 + │ └─TableReader 3336.66 root data:Selection + │ └─Selection 3336.66 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 1), gt(executor__merge_join.t1.c2, 20)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +1 1 NULL NULL +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 8000.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─MergeJoin 10000.00 root right outer join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t.c1 + ├─Sort(Build) 3336.66 root executor__merge_join.t1.c1 + │ └─TableReader 3336.66 root data:Selection + │ └─Selection 3336.66 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 1), gt(executor__merge_join.t1.c2, 20)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +NULL NULL 1 1 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 9990.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─MergeJoin 12487.50 root right outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 9990.00 root executor__merge_join.t.c1 + │ └─TableReader 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.t.c1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t1.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +id estRows task access object operator info +MergeJoin 12.50 root inner join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 +├─Sort(Build) 10.00 root executor__merge_join.t1.c1 +│ └─TableReader 10.00 root data:Selection +│ └─Selection 10.00 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 3), 0) +│ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +└─Sort(Probe) 10.00 root executor__merge_join.t.c1 + └─TableReader 10.00 root data:Selection + └─Selection 10.00 cop[tikv] not(isnull(executor__merge_join.t.c1)), or(eq(executor__merge_join.t.c1, 3), 0) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +c1 c2 c1 c2 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +id estRows task access object operator info +Sort 10000.00 root executor__merge_join.t1.c1 +└─MergeJoin 10000.00 root left outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1, left cond:[ne(executor__merge_join.t.c1, 1)] + ├─Sort(Build) 6656.67 root executor__merge_join.t1.c1 + │ └─TableReader 6656.67 root data:Selection + │ └─Selection 6656.67 cop[tikv] ne(executor__merge_join.t1.c1, 1), not(isnull(executor__merge_join.t1.c1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +c1 c2 c1 c2 +1 1 NULL NULL +2 2 2 3 +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1 (c1 int, c2 int); +create table t2 (c1 int, c2 int); +create table t3 (c1 int, c2 int); +insert into t1 values (1,1), (2,2), (3,3); +insert into t2 values (1,1), (3,3), (5,5); +insert into t3 values (1,1), (5,5), (9,9); +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2; +c1 c2 c1 c2 c1 c2 +NULL NULL NULL NULL 5 5 +NULL NULL NULL NULL 9 9 +1 1 1 1 1 1 +drop table if exists t1; +create table t1 (c1 int); +insert into t1 values (1), (1), (1); +select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1; +c1 c1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +drop table if exists t; +drop table if exists t1; +create table t(c1 int, index k(c1)); +create table t1(c1 int); +insert into t values (1),(2),(3),(4),(5),(6),(7); +insert into t1 values (1),(2),(3),(4),(5),(6),(7); +select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1; +c1 +1 +2 +3 +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1; +c1 +1 +2 +3 +begin; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +insert into t1 values(8); +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +rollback; +drop table if exists t; +drop table if exists t1; +create table t(c1 int); +create table t1(c1 int unsigned); +insert into t values (1); +insert into t1 values (1); +select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1; +c1 +1 +drop table if exists t; +create table t(a int, b int, index a(a), index b(b)); +insert into t values(1, 2); +select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a; +a b +NULL 2 +drop table if exists t; +drop table if exists s; +create table t(a int, b int, primary key(a, b)); +insert into t value(1,1),(1,2),(1,3),(1,4); +create table s(a int, primary key(a)); +insert into s value(1); +select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a; +count(*) +4 +drop table if exists t; +create table t(a int); +insert into t value(1),(2); +explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +id estRows task access object operator info +Sort 100000000.00 root executor__merge_join.t.a, executor__merge_join.t.a +└─MergeJoin 100000000.00 root inner join + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +a a +1 1 +1 2 +2 1 +2 2 +drop table if exists t; +drop table if exists s; +create table t(a int, b int); +insert into t values(1,1),(1,2); +create table s(a int, b int); +insert into s values(1,1); +explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +id estRows task access object operator info +MergeJoin 10000.00 root left outer semi join, other cond:eq(executor__merge_join.t.a, executor__merge_join.s.a), ge(executor__merge_join.s.b, executor__merge_join.t.b) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo +└─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +a in (select a from s where s.b >= t.b) +1 +0 +drop table if exists t; +drop table if exists t1; +create table t (a int, key(a)); +create table t1 (a int, key(a)); +insert into t values (1), (2), (3); +insert into t1 values (1), (2), (3); +select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc; +a +3 +2 +1 +drop table if exists t; +create table t (a int, b int, key(a), key(b)); +insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3); +select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc; +a +3 +3 +3 +3 +3 +3 +2 +2 +2 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +drop table if exists s; +create table s (a int); +insert into s values (4), (1), (3), (2); +explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +id estRows task access object operator info +Sort 12487.50 root executor__merge_join.s.a:desc +└─HashJoin 12487.50 root inner join, equal:[eq(executor__merge_join.s.a, executor__merge_join.s.a)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.s.a)) + │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.s.a)) + └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo +select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +a1 +4 +3 +2 +1 +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists t; +drop table if exists t1; +create table t(c1 int, c2 int); +create table t1(c1 int, c2 int); +insert into t values(1,1),(2,2); +insert into t1 values(2,3),(4,4); +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 8000.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader TableReader] + └─MergeJoin 10000.00 root left outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 3336.66 root executor__merge_join.t1.c1 + │ └─TableReader 3336.66 root data:Selection + │ └─Selection 3336.66 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 1), gt(executor__merge_join.t1.c2, 20)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +1 1 NULL NULL +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 8000.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader TableReader] + └─MergeJoin 10000.00 root right outer join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t.c1 + ├─Sort(Build) 3336.66 root executor__merge_join.t1.c1 + │ └─TableReader 3336.66 root data:Selection + │ └─Selection 3336.66 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 1), gt(executor__merge_join.t1.c2, 20)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +NULL NULL 1 1 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +id estRows task access object operator info +Selection 9990.00 root or(eq(executor__merge_join.t.c1, 1), gt(executor__merge_join.t1.c2, 20)) +└─Shuffle 12487.50 root execution info: concurrency:4, data sources:[TableReader TableReader] + └─MergeJoin 12487.50 root right outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 9990.00 root executor__merge_join.t.c1 + │ └─TableReader 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.t.c1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t1.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +c1 c2 c1 c2 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +id estRows task access object operator info +Shuffle 12.50 root execution info: concurrency:4, data sources:[TableReader TableReader] +└─MergeJoin 12.50 root inner join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1 + ├─Sort(Build) 10.00 root executor__merge_join.t1.c1 + │ └─TableReader 10.00 root data:Selection + │ └─Selection 10.00 cop[tikv] not(isnull(executor__merge_join.t1.c1)), or(eq(executor__merge_join.t1.c1, 3), 0) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10.00 root executor__merge_join.t.c1 + └─TableReader 10.00 root data:Selection + └─Selection 10.00 cop[tikv] not(isnull(executor__merge_join.t.c1)), or(eq(executor__merge_join.t.c1, 3), 0) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +c1 c2 c1 c2 +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +id estRows task access object operator info +Sort 10000.00 root executor__merge_join.t1.c1 +└─Shuffle 10000.00 root execution info: concurrency:4, data sources:[TableReader TableReader] + └─MergeJoin 10000.00 root left outer join, left key:executor__merge_join.t.c1, right key:executor__merge_join.t1.c1, left cond:[ne(executor__merge_join.t.c1, 1)] + ├─Sort(Build) 6656.67 root executor__merge_join.t1.c1 + │ └─TableReader 6656.67 root data:Selection + │ └─Selection 6656.67 cop[tikv] ne(executor__merge_join.t1.c1, 1), not(isnull(executor__merge_join.t1.c1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo + └─Sort(Probe) 10000.00 root executor__merge_join.t.c1 + └─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +c1 c2 c1 c2 +1 1 NULL NULL +2 2 2 3 +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1 (c1 int, c2 int); +create table t2 (c1 int, c2 int); +create table t3 (c1 int, c2 int); +insert into t1 values (1,1), (2,2), (3,3); +insert into t2 values (1,1), (3,3), (5,5); +insert into t3 values (1,1), (5,5), (9,9); +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2; +c1 c2 c1 c2 c1 c2 +NULL NULL NULL NULL 5 5 +NULL NULL NULL NULL 9 9 +1 1 1 1 1 1 +drop table if exists t1; +create table t1 (c1 int); +insert into t1 values (1), (1), (1); +select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1; +c1 c1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +1 1 +drop table if exists t; +drop table if exists t1; +create table t(c1 int, index k(c1)); +create table t1(c1 int); +insert into t values (1),(2),(3),(4),(5),(6),(7); +insert into t1 values (1),(2),(3),(4),(5),(6),(7); +select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1; +c1 +1 +2 +3 +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1; +c1 +1 +2 +3 +begin; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +insert into t1 values(8); +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1; +c1 +1 +2 +3 +4 +5 +6 +7 +rollback; +drop table if exists t; +drop table if exists t1; +create table t(c1 int); +create table t1(c1 int unsigned); +insert into t values (1); +insert into t1 values (1); +select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1; +c1 +1 +drop table if exists t; +create table t(a int, b int, index a(a), index b(b)); +insert into t values(1, 2); +select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a; +a b +NULL 2 +drop table if exists t; +drop table if exists s; +create table t(a int, b int, primary key(a, b)); +insert into t value(1,1),(1,2),(1,3),(1,4); +create table s(a int, primary key(a)); +insert into s value(1); +select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a; +count(*) +4 +drop table if exists t; +create table t(a int); +insert into t value(1),(2); +explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +id estRows task access object operator info +Sort 100000000.00 root executor__merge_join.t.a, executor__merge_join.t.a +└─MergeJoin 100000000.00 root inner join + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +a a +1 1 +1 2 +2 1 +2 2 +drop table if exists t; +drop table if exists s; +create table t(a int, b int); +insert into t values(1,1),(1,2); +create table s(a int, b int); +insert into s values(1,1); +explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +id estRows task access object operator info +MergeJoin 10000.00 root left outer semi join, other cond:eq(executor__merge_join.t.a, executor__merge_join.s.a), ge(executor__merge_join.s.b, executor__merge_join.t.b) +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo +└─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +a in (select a from s where s.b >= t.b) +1 +0 +drop table if exists t; +drop table if exists t1; +create table t (a int, key(a)); +create table t1 (a int, key(a)); +insert into t values (1), (2), (3); +insert into t1 values (1), (2), (3); +select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc; +a +3 +2 +1 +drop table if exists t; +create table t (a int, b int, key(a), key(b)); +insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3); +select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc; +a +3 +3 +3 +3 +3 +3 +2 +2 +2 +2 +2 +2 +1 +1 +1 +1 +1 +1 +1 +1 +1 +drop table if exists s; +create table s (a int); +insert into s values (4), (1), (3), (2); +explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +id estRows task access object operator info +Sort 12487.50 root executor__merge_join.s.a:desc +└─HashJoin 12487.50 root inner join, equal:[eq(executor__merge_join.s.a, executor__merge_join.s.a)] + ├─TableReader(Build) 9990.00 root data:Selection + │ └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.s.a)) + │ └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo + └─TableReader(Probe) 9990.00 root data:Selection + └─Selection 9990.00 cop[tikv] not(isnull(executor__merge_join.s.a)) + └─TableFullScan 10000.00 cop[tikv] table:s keep order:false, stats:pseudo +select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +a1 +4 +3 +2 +1 +set @@session.tidb_merge_join_concurrency = default; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1(c1 int, c2 int, PRIMARY KEY (c1)); +create table t2(c1 int, c2 int, PRIMARY KEY (c1)); +create table t3(c1 int, c2 int, PRIMARY KEY (c1)); +insert into t1 values(1,1),(2,2),(3,3); +insert into t2 values(2,3),(3,4),(4,5); +insert into t3 values(1,2),(2,4),(3,10); +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +id estRows task access object operator info +Sort 15625.00 root executor__merge_join.t1.c1 +└─MergeJoin 15625.00 root inner join, left key:executor__merge_join.t2.c1, right key:executor__merge_join.t3.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo + └─MergeJoin(Probe) 12500.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +id estRows task access object operator info +Sort 15625.00 root executor__merge_join.t1.c1 +└─MergeJoin 15625.00 root inner join, left key:executor__merge_join.t2.c1, right key:executor__merge_join.t3.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo + └─MergeJoin(Probe) 12500.00 root right outer join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +id estRows task access object operator info +MergeJoin 15625.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t3.c1 +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo +└─MergeJoin(Probe) 12500.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1(c1 int, c2 int, PRIMARY KEY (c1)); +create table t2(c1 int, c2 int, PRIMARY KEY (c1)); +create table t3(c1 int, c2 int, PRIMARY KEY (c1)); +insert into t1 values(1,1),(2,2),(3,3); +insert into t2 values(2,3),(3,4),(4,5); +insert into t3 values(1,2),(2,4),(3,10); +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +id estRows task access object operator info +Sort 15625.00 root executor__merge_join.t1.c1 +└─MergeJoin 15625.00 root inner join, left key:executor__merge_join.t2.c1, right key:executor__merge_join.t3.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo + └─MergeJoin(Probe) 12500.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +id estRows task access object operator info +Sort 15625.00 root executor__merge_join.t1.c1 +└─MergeJoin 15625.00 root inner join, left key:executor__merge_join.t2.c1, right key:executor__merge_join.t3.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo + └─MergeJoin(Probe) 12500.00 root right outer join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +id estRows task access object operator info +MergeJoin 15625.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t3.c1 +├─TableReader(Build) 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t3 keep order:true, stats:pseudo +└─MergeJoin(Probe) 12500.00 root inner join, left key:executor__merge_join.t1.c1, right key:executor__merge_join.t2.c1 + ├─TableReader(Build) 10000.00 root data:TableFullScan + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:true, stats:pseudo + └─TableReader(Probe) 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:true, stats:pseudo +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +c1 c2 c1 c2 c1 c2 +2 2 2 3 2 4 +3 3 3 4 3 10 +set @@session.tidb_merge_join_concurrency = default; +set @@session.tidb_executor_concurrency = 4; +set @@session.tidb_hash_join_concurrency = 5; +set @@session.tidb_distsql_scan_concurrency = 15; +drop table if exists t1; +drop table if exists t2; +create table t1(a bigint, b bit(1), index idx_a(a)); +create table t2(a bit(1) not null, b bit(1), index idx_a(a)); +insert into t1 values(1, 1); +insert into t2 values(1, 1); +select hex(t1.a), hex(t2.a) from t1 inner join t2 on t1.a=t2.a; +hex(t1.a) hex(t2.a) +1 1 +drop table if exists t1; +drop table if exists t2; +create table t1(a float, b double, index idx_a(a)); +create table t2(a double not null, b double, index idx_a(a)); +insert into t1 values(1, 1); +insert into t2 values(1, 1); +select t1.a, t2.a from t1 inner join t2 on t1.a=t2.a; +a a +1 1 +drop table if exists t1; +drop table if exists t2; +create table t1(a bigint signed, b bigint, index idx_a(a)); +create table t2(a bigint unsigned, b bigint, index idx_a(a)); +insert into t1 values(-1, 0), (-1, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0); +insert into t2 values(18446744073709551615, 0), (18446744073709551615, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0); +select t1.a, t2.a from t1 join t2 on t1.a=t2.a order by t1.a; +a a +0 0 +0 0 +0 0 +0 0 +set @@session.tidb_executor_concurrency = default; +set @@session.tidb_hash_join_concurrency = default; +set @@session.tidb_distsql_scan_concurrency = default; +drop table if exists R; +drop table if exists Y; +create table Y (a int primary key, b int, index id_b(b)); +insert into Y values (0,2),(2,2); +create table R (a int primary key, b int); +insert into R values (2,2); +select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a; +max(Y.a) +2 +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists R; +drop table if exists Y; +create table Y (a int primary key, b int, index id_b(b)); +insert into Y values (0,2),(2,2); +create table R (a int primary key, b int); +insert into R values (2,2); +select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a; +max(Y.a) +2 +set @@session.tidb_merge_join_concurrency = default; diff --git a/tests/integrationtest/r/executor/parallel_apply.result b/tests/integrationtest/r/executor/parallel_apply.result new file mode 100644 index 0000000000000..a7a53dd84d730 --- /dev/null +++ b/tests/integrationtest/r/executor/parallel_apply.result @@ -0,0 +1,172 @@ +set tidb_enable_parallel_apply=0; +select @@tidb_enable_parallel_apply; +@@tidb_enable_parallel_apply +0 +set tidb_enable_parallel_apply=1; +select @@tidb_enable_parallel_apply; +@@tidb_enable_parallel_apply +1 +set tidb_enable_parallel_apply=on; +select @@tidb_enable_parallel_apply; +@@tidb_enable_parallel_apply +1 +set tidb_enable_parallel_apply=off; +select @@tidb_enable_parallel_apply; +@@tidb_enable_parallel_apply +0 +set tidb_enable_parallel_apply=-1; +Error 1231 (42000): Variable 'tidb_enable_parallel_apply' can't be set to the value of '-1' +set tidb_enable_parallel_apply=2; +Error 1231 (42000): Variable 'tidb_enable_parallel_apply' can't be set to the value of '2' +set tidb_enable_parallel_apply=1000; +Error 1231 (42000): Variable 'tidb_enable_parallel_apply' can't be set to the value of '1000' +set tidb_enable_parallel_apply='onnn'; +Error 1231 (42000): Variable 'tidb_enable_parallel_apply' can't be set to the value of 'onnn' +set tidb_enable_parallel_apply=default; +set tidb_enable_parallel_apply=true; +drop table if exists t, t1; +create table t(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int); +create table t1(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int); +insert into t values ('a', 1), ('A', 2), ('a', 3), ('A', 4); +insert into t1 values ('a', 1), ('A', 2), ('a', 3), ('A', 4); +select (select min(t1.b) from t1 where t1.a >= t.a), (select sum(t1.b) from t1 where t1.a >= t.a) from t; +(select min(t1.b) from t1 where t1.a >= t.a) (select sum(t1.b) from t1 where t1.a >= t.a) +1 10 +1 10 +1 10 +1 10 +select (select min(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b), (select sum(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) from t; +(select min(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) (select sum(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) +1 10 +2 9 +3 7 +4 4 +set tidb_enable_prepared_plan_cache=1; +drop table if exists t1, t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values (1, 1), (1, 5), (2, 3), (2, 4), (3, 3); +insert into t2 values (0, 1), (2, -1), (3, 2); +prepare stmt from "select * from t1 where t1.b >= (select sum(t2.b) from t2 where t2.a > t1.a and t2.a > ?)"; +set @a=1; +execute stmt using @a; +a b +1 1 +1 5 +2 3 +2 4 +set @a=2; +execute stmt using @a; +a b +1 5 +2 3 +2 4 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_clustered_index=ON; +drop table if exists t, t2; +create table t(a int, b int, c int, primary key(a, b)); +create table t2(a int, b int, c int, primary key(a, c)); +insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4); +insert into t2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4); +select * from t where (select min(t2.b) from t2 where t2.a > t.a) > 0; +a b c +1 1 1 +2 2 2 +3 3 3 +set tidb_enable_clustered_index=INT_ONLY; +drop table if exists t1, t2; +create table t1(a int, b int) partition by range(a) (partition p0 values less than(10), partition p1 values less than(20), partition p2 values less than(30), partition p3 values less than(40)); +create table t2(a int, b int) partition by hash(a) partitions 4; +insert into t1 values (5, 5), (15, 15), (25, 25), (35, 35); +insert into t2 values (5, 5), (15, 15), (25, 25), (35, 35); +select (select count(*) from t2 where t2.a > t1.b and t2.a=20), (select max(t2.b) from t2 where t2.a between t1.a and 20) from t1 where t1.a > 10; +(select count(*) from t2 where t2.a > t1.b and t2.a=20) (select max(t2.b) from t2 where t2.a between t1.a and 20) +0 NULL +0 NULL +0 15 +set tidb_enable_parallel_apply=default; +set tidb_enable_prepared_plan_cache=default; +set tidb_enable_clustered_index=default; +set tidb_enable_parallel_apply=true; +drop table if exists t, t2; +create table t(a bigint, b int); +create table t2(a int, b int); +insert into t values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4); +insert into t2 values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4); +delete from t where (select min(t2.a) * 2 from t2 where t2.a < t.a) > 1; +select * from t; +a b +1 1 +1 1 +drop table if exists t; +create table t(a int, b int, c int); +insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 1, 1), (2, 2, 2), (3, 3, 3); +insert into t (select * from t where (select count(*) from t t1 where t1.b > t.a) > 2); +select * from t; +a b c +1 1 1 +1 1 1 +1 1 1 +1 1 1 +2 2 2 +2 2 2 +3 3 3 +3 3 3 +drop table if exists t, t2; +create table t(a smallint, b int); +create table t2(a int, b int); +insert into t values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +update t set a = a + 1 where (select count(*) from t2 where t2.a <= t.a) in (1, 2); +select * from t; +a b +2 1 +2 1 +2 2 +2 2 +3 3 +3 3 +drop table if exists t, t2; +create table t(a tinyint, b int, unique index idx(a)); +create table t2(a tinyint, b int); +insert into t values (1, 1), (2, 2), (3, 3), (4, 4); +insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +replace into t (select pow(t2.a, 2), t2.b from t2 where (select min(t.a) from t where t.a > t2.a) between 1 and 5); +select * from t; +a b +1 1 +2 2 +3 3 +4 2 +9 3 +drop table if exists t1, t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values (1, 2), (1, 3); +begin; +insert into t1 values (1, 4), (2, 3), (2, 5); +insert into t2 values (2, 3), (3, 4); +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +a b +1 4 +2 5 +delete from t1 where a = 1; +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +a b +2 5 +commit; +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +a b +2 5 +set tidb_enable_parallel_apply=default; +set tidb_enable_parallel_apply=true; +drop table if exists t1, t2; +create table t1(a int); +create table t2(a int); +select case when t1.a is null +then (select t2.a from t2 where t2.a = t1.a limit 1) else t1.a end a +from t1 where t1.a=1 order by a limit 1; +a +set tidb_enable_parallel_apply=default; diff --git a/tests/integrationtest/r/executor/partition/issues.result b/tests/integrationtest/r/executor/partition/issues.result new file mode 100644 index 0000000000000..36acea78dcc46 --- /dev/null +++ b/tests/integrationtest/r/executor/partition/issues.result @@ -0,0 +1,461 @@ +drop table if exists t, t0, t1, t2; +set @@tidb_partition_prune_mode = 'dynamic'; +set @@session.tidb_enable_list_partition = ON; +CREATE TABLE t ( +col1 tinyint(4) primary key +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( COL1 DIV 80 ) +PARTITIONS 6; +insert into t values(-128), (107); +prepare stmt from 'select col1 from t where col1 in (?, ?, ?)'; +set @a=-128, @b=107, @c=-128; +execute stmt using @a,@b,@c; +col1 +-128 +107 +CREATE TABLE t0 (a int primary key) PARTITION BY HASH( a DIV 80 ) PARTITIONS 2; +insert into t0 values (1); +select a from t0 where a in (1); +a +1 +create table t1 (a int primary key) partition by range (a+5) ( +partition p0 values less than(10), partition p1 values less than(20)); +insert into t1 values (5); +select a from t1 where a in (5); +a +5 +create table t2 (a int primary key) partition by list (a+5) ( +partition p0 values in (5, 6, 7, 8), partition p1 values in (9, 10, 11, 12)); +insert into t2 values (5); +select a from t2 where a in (5); +a +5 +set @@tidb_partition_prune_mode = default; +set @@session.tidb_enable_list_partition = default; +drop table if exists UK_HP16726; +CREATE TABLE UK_HP16726 ( +COL1 bigint(16) DEFAULT NULL, +COL2 varchar(20) DEFAULT NULL, +COL4 datetime DEFAULT NULL, +COL3 bigint(20) DEFAULT NULL, +COL5 float DEFAULT NULL, +UNIQUE KEY UK_COL1 (COL1) /*!80000 INVISIBLE */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH( COL1 ) +PARTITIONS 25; +select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +col1 col1 +explain format='brief' select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +id estRows task access object operator info +HashAgg 71666.67 root group by:executor__partition__issues.uk_hp16726.col1, executor__partition__issues.uk_hp16726.col1, funcs:firstrow(executor__partition__issues.uk_hp16726.col1)->executor__partition__issues.uk_hp16726.col1, funcs:firstrow(executor__partition__issues.uk_hp16726.col1)->executor__partition__issues.uk_hp16726.col1 +└─HashJoin 111979.17 root inner join, equal:[eq(executor__partition__issues.uk_hp16726.col1, executor__partition__issues.uk_hp16726.col1)] + ├─PartitionUnion(Build) 89583.33 root + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p0 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p1 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p2 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p3 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p4 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p5 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p6 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p7 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p8 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p9 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p10 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p11 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p12 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p13 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p14 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p15 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p16 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p17 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p18 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p19 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p20 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p21 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p22 keep order:false, stats:pseudo + │ ├─TableReader 3583.33 root data:Selection + │ │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p23 keep order:false, stats:pseudo + │ └─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2, partition:p24 keep order:false, stats:pseudo + └─PartitionUnion(Probe) 89583.33 root + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p0 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p1 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p2 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p3 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p4 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p5 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p6 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p7 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p8 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p9 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p10 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p11 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p12 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p13 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p14 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p15 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p16 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p17 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p18 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p19 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p20 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p21 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p22 keep order:false, stats:pseudo + ├─TableReader 3583.33 root data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p23 keep order:false, stats:pseudo + └─TableReader 3583.33 root data:Selection + └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + └─TableFullScan 10000.00 cop[tikv] table:t1, partition:p24 keep order:false, stats:pseudo +set @@tidb_partition_prune_mode = 'dynamic'; +analyze table UK_HP16726; +select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +col1 col1 +explain format='brief' select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +id estRows task access object operator info +HashAgg 2866.67 root group by:executor__partition__issues.uk_hp16726.col1, executor__partition__issues.uk_hp16726.col1, funcs:firstrow(executor__partition__issues.uk_hp16726.col1)->executor__partition__issues.uk_hp16726.col1, funcs:firstrow(executor__partition__issues.uk_hp16726.col1)->executor__partition__issues.uk_hp16726.col1 +└─HashJoin 4479.17 root inner join, equal:[eq(executor__partition__issues.uk_hp16726.col1, executor__partition__issues.uk_hp16726.col1)] + ├─TableReader(Build) 3583.33 root partition:all data:Selection + │ └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + │ └─TableFullScan 10000.00 cop[tikv] table:t2 keep order:false, stats:pseudo + └─TableReader(Probe) 3583.33 root partition:all data:Selection + └─Selection 3583.33 cop[tikv] gt(executor__partition__issues.uk_hp16726.col1, -9223372036854775808), ne(executor__partition__issues.uk_hp16726.col1, 9223372036854775807), not(isnull(executor__partition__issues.uk_hp16726.col1)) + └─TableFullScan 10000.00 cop[tikv] table:t1 keep order:false, stats:pseudo +set @@tidb_partition_prune_mode = default; +drop table if exists IDT_HP23902, t; +CREATE TABLE IDT_HP23902 ( +COL1 smallint DEFAULT NULL, +COL2 varchar(20) DEFAULT NULL, +COL4 datetime DEFAULT NULL, +COL3 bigint DEFAULT NULL, +COL5 float DEFAULT NULL, +KEY UK_COL1 (COL1) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH( COL1+30 ) +PARTITIONS 6; +insert ignore into IDT_HP23902 partition(p0, p1)(col1, col3) values(-10355, 1930590137900568573), (13810, -1332233145730692137); +show warnings; +Level Code Message +Warning 1748 Found a row not matching the given partition set +Warning 1748 Found a row not matching the given partition set +select * from IDT_HP23902; +COL1 COL2 COL4 COL3 COL5 +create table t ( +a int +) partition by range(a) ( +partition p0 values less than (10), +partition p1 values less than (20)); +insert ignore into t partition(p0)(a) values(12); +show warnings; +Level Code Message +Warning 1748 Found a row not matching the given partition set +select * from t; +a +drop table if exists tbl_936; +set @@tidb_partition_prune_mode = 'dynamic'; +CREATE TABLE tbl_936 ( +col_5410 smallint NOT NULL, +col_5411 double, +col_5412 boolean NOT NULL DEFAULT 1, +col_5413 set('Alice', 'Bob', 'Charlie', 'David') NOT NULL DEFAULT 'Charlie', +col_5414 varbinary(147) COLLATE 'binary' DEFAULT 'bvpKgYWLfyuTiOYSkj', +col_5415 timestamp NOT NULL DEFAULT '2021-07-06', +col_5416 decimal(6, 6) DEFAULT 0.49, +col_5417 text COLLATE utf8_bin, +col_5418 float DEFAULT 2048.0762299371554, +col_5419 int UNSIGNED NOT NULL DEFAULT 3152326370, +PRIMARY KEY (col_5419) ) +PARTITION BY HASH (col_5419) PARTITIONS 3; +SELECT last_value(col_5414) OVER w FROM tbl_936 +WINDOW w AS (ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419) +ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419, nth_value(col_5412, 5) OVER w; +last_value(col_5414) OVER w +set @@tidb_partition_prune_mode = default; +drop table if exists t; +CREATE TABLE t (a int, b date, c int, PRIMARY KEY (a,b)) +PARTITION BY RANGE ( TO_DAYS(b) ) ( +PARTITION p0 VALUES LESS THAN (737821), +PARTITION p1 VALUES LESS THAN (738289) +); +INSERT INTO t (a, b, c) VALUES(0, '2021-05-05', 0); +select c from t use index(primary) where a=0 limit 1; +c +0 +CREATE TABLE test_partition ( +a varchar(100) NOT NULL, +b date NOT NULL, +c varchar(100) NOT NULL, +d datetime DEFAULT NULL, +e datetime DEFAULT NULL, +f bigint(20) DEFAULT NULL, +g bigint(20) DEFAULT NULL, +h bigint(20) DEFAULT NULL, +i bigint(20) DEFAULT NULL, +j bigint(20) DEFAULT NULL, +k bigint(20) DEFAULT NULL, +l bigint(20) DEFAULT NULL, +PRIMARY KEY (a,b,c) /*T![clustered_index] NONCLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY RANGE ( TO_DAYS(b) ) ( +PARTITION pmin VALUES LESS THAN (737821), +PARTITION p20200601 VALUES LESS THAN (738289)); +INSERT INTO test_partition (a, b, c, d, e, f, g, h, i, j, k, l) VALUES('aaa', '2021-05-05', '428ff6a1-bb37-42ac-9883-33d7a29961e6', '2021-05-06 08:13:38', '2021-05-06 13:28:08', 0, 8, 3, 0, 9, 1, 0); +select c,j,l from test_partition where c='428ff6a1-bb37-42ac-9883-33d7a29961e6' and a='aaa' limit 0, 200; +c j l +428ff6a1-bb37-42ac-9883-33d7a29961e6 9 0 +drop table if exists tbl_500, tbl_600; +set @@tidb_partition_prune_mode = 'dynamic'; +CREATE TABLE tbl_500 ( +col_20 tinyint(4) NOT NULL, +col_21 varchar(399) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL, +col_22 json DEFAULT NULL, +col_23 blob DEFAULT NULL, +col_24 mediumint(9) NOT NULL, +col_25 float NOT NULL DEFAULT '7306.384497585912', +col_26 binary(196) NOT NULL, +col_27 timestamp DEFAULT '1976-12-08 00:00:00', +col_28 bigint(20) NOT NULL, +col_29 tinyint(1) NOT NULL DEFAULT '1', +PRIMARY KEY (col_29,col_20) /*T![clustered_index] NONCLUSTERED */, +KEY idx_7 (col_28,col_20,col_26,col_27,col_21,col_24), +KEY idx_8 (col_25,col_29,col_24) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +CREATE TABLE tbl_600 ( +col_60 int(11) NOT NULL DEFAULT '-776833487', +col_61 tinyint(1) NOT NULL DEFAULT '1', +col_62 tinyint(4) NOT NULL DEFAULT '-125', +PRIMARY KEY (col_62,col_60,col_61) /*T![clustered_index] NONCLUSTERED */, +KEY idx_19 (col_60) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci +PARTITION BY HASH( col_60 ) +PARTITIONS 1; +insert into tbl_500 select -34, 'lrfGPPPUuZjtT', '{"obj1": {"sub_obj0": 100}}', 0x6C47636D, 1325624, 7306.3843, 'abc', '1976-12-08', 4757891479624162031, 0; +select tbl_5.* from tbl_500 tbl_5 where col_24 in ( select col_62 from tbl_600 where tbl_5.col_26 < 'hSvHLdQeGBNIyOFXStV' ); +col_20 col_21 col_22 col_23 col_24 col_25 col_26 col_27 col_28 col_29 +set @@tidb_partition_prune_mode = default; +drop table if exists t1, t2; +set @@tidb_partition_prune_mode='static-only'; +create table t1 (c_datetime datetime, primary key (c_datetime)) +partition by range (to_days(c_datetime)) ( partition p0 values less than (to_days('2020-02-01')), +partition p1 values less than (to_days('2020-04-01')), +partition p2 values less than (to_days('2020-06-01')), +partition p3 values less than maxvalue); +create table t2 (c_datetime datetime, unique key(c_datetime)); +insert into t1 values ('2020-06-26 03:24:00'), ('2020-02-21 07:15:33'), ('2020-04-27 13:50:58'); +insert into t2 values ('2020-01-10 09:36:00'), ('2020-02-04 06:00:00'), ('2020-06-12 03:45:18'); +begin; +select * from t1 join t2 on t1.c_datetime >= t2.c_datetime for update; +c_datetime c_datetime +2020-02-21 07:15:33 2020-01-10 09:36:00 +2020-02-21 07:15:33 2020-02-04 06:00:00 +2020-04-27 13:50:58 2020-01-10 09:36:00 +2020-04-27 13:50:58 2020-02-04 06:00:00 +2020-06-26 03:24:00 2020-01-10 09:36:00 +2020-06-26 03:24:00 2020-02-04 06:00:00 +2020-06-26 03:24:00 2020-06-12 03:45:18 +rollback; +set @@tidb_partition_prune_mode = default; +drop table if exists p, t; +set @@tidb_enable_list_partition = OFF; +create table t (a int, b int, unique index idx(a)) partition by list columns(b) (partition p0 values in (1), partition p1 values in (2)); +set @@tidb_enable_list_partition = default; +drop table if exists issue25528; +set @@tidb_partition_prune_mode = 'static'; +create table issue25528 (id int primary key, balance DECIMAL(10, 2), balance2 DECIMAL(10, 2) GENERATED ALWAYS AS (-balance) VIRTUAL, created_at TIMESTAMP) PARTITION BY HASH(id) PARTITIONS 8; +insert into issue25528 (id, balance, created_at) values(1, 100, '2021-06-17 22:35:20'); +begin pessimistic; +select * from issue25528 where id = 1 for update; +id balance balance2 created_at +1 100.00 -100.00 2021-06-17 22:35:20 +drop table if exists issue25528; +CREATE TABLE `issue25528` ( `c1` int(11) NOT NULL, `c2` int(11) DEFAULT NULL, `c3` int(11) DEFAULT NULL, `c4` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) /*T![clustered_index] CLUSTERED */, KEY `k2` (`c2`), KEY `k3` (`c3`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( `c1` ) PARTITIONS 10; +INSERT INTO issue25528 (`c1`, `c2`, `c3`, `c4`) VALUES (1, 1, 1, 1) , (3, 3, 3, 3) , (2, 2, 2, 2) , (4, 4, 4, 4); +select * from issue25528 where c1 in (3, 4) order by c2 for update; +c1 c2 c3 c4 +3 3 3 3 +4 4 4 4 +rollback; +set @@tidb_enable_list_partition = default; +set @@tidb_enable_index_merge=1,@@tidb_partition_prune_mode='dynamic'; +DROP TABLE IF EXISTS `tbl_18`; +CREATE TABLE `tbl_18` (`col_119` binary(16) NOT NULL DEFAULT 'skPoKiwYUi',`col_120` int(10) unsigned NOT NULL,`col_121` timestamp NOT NULL,`col_122` double NOT NULL DEFAULT '3937.1887880628115',`col_123` bigint(20) NOT NULL DEFAULT '3550098074891542725',PRIMARY KEY (`col_123`,`col_121`,`col_122`,`col_120`) CLUSTERED,UNIQUE KEY `idx_103` (`col_123`,`col_119`,`col_120`),UNIQUE KEY `idx_104` (`col_122`,`col_120`),UNIQUE KEY `idx_105` (`col_119`,`col_120`),KEY `idx_106` (`col_121`,`col_120`,`col_122`,`col_119`),KEY `idx_107` (`col_121`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PARTITION BY HASH( `col_120` ) PARTITIONS 3; +INSERT INTO tbl_18 (`col_119`, `col_120`, `col_121`, `col_122`, `col_123`) VALUES (X'736b506f4b6977595569000000000000', 672436701, '1974-02-24 00:00:00', 3937.1887880628115e0, -7373106839136381229), (X'736b506f4b6977595569000000000000', 2637316689, '1993-10-29 00:00:00', 3937.1887880628115e0, -4522626077860026631), (X'736b506f4b6977595569000000000000', 831809724, '1995-11-20 00:00:00', 3937.1887880628115e0, -4426441253940231780), (X'736b506f4b6977595569000000000000', 1588592628, '2001-03-28 00:00:00', 3937.1887880628115e0, 1329207475772244999), (X'736b506f4b6977595569000000000000', 3908038471, '2031-06-06 00:00:00', 3937.1887880628115e0, -6562815696723135786), (X'736b506f4b6977595569000000000000', 1674237178, '2001-10-24 00:00:00', 3937.1887880628115e0, -6459065549188938772), (X'736b506f4b6977595569000000000000', 3507075493, '2010-03-25 00:00:00', 3937.1887880628115e0, -4329597025765326929), (X'736b506f4b6977595569000000000000', 1276461709, '2019-07-20 00:00:00', 3937.1887880628115e0, 3550098074891542725); +select col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update; +col_120 col_122 col_123 +1588592628 3937.1887880628115 1329207475772244999 +1674237178 3937.1887880628115 -6459065549188938772 +2637316689 3937.1887880628115 -4522626077860026631 +672436701 3937.1887880628115 -7373106839136381229 +831809724 3937.1887880628115 -4426441253940231780 +select /*+ use_index_merge( tbl_18 ) */ col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update; +col_120 col_122 col_123 +1588592628 3937.1887880628115 1329207475772244999 +1674237178 3937.1887880628115 -6459065549188938772 +2637316689 3937.1887880628115 -4522626077860026631 +672436701 3937.1887880628115 -7373106839136381229 +831809724 3937.1887880628115 -4426441253940231780 +set @@tidb_enable_index_merge=default,@@tidb_partition_prune_mode=default; +drop table if exists t; +CREATE TABLE `t` (`a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL) PARTITION BY RANGE (`a`) (PARTITION `p0` VALUES LESS THAN (2021), PARTITION `p1` VALUES LESS THAN (3000)); +set @@tidb_partition_prune_mode = 'static'; +insert into t select * from t where a=3000; +set @@tidb_partition_prune_mode = 'dynamic'; +insert into t select * from t where a=3000; +set @@tidb_partition_prune_mode = default; +set @@tidb_opt_advanced_join_hint=0; +drop table if exists c, t; +CREATE TABLE `c` (`serial_id` varchar(24),`occur_trade_date` date,`txt_account_id` varchar(24),`capital_sub_class` varchar(10),`occur_amount` decimal(16,2),`broker` varchar(10),PRIMARY KEY (`txt_account_id`,`occur_trade_date`,`serial_id`) /*T![clustered_index] CLUSTERED */,KEY `idx_serial_id` (`serial_id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci PARTITION BY RANGE COLUMNS(`serial_id`) (PARTITION `p202209` VALUES LESS THAN ('20221001'),PARTITION `p202210` VALUES LESS THAN ('20221101'),PARTITION `p202211` VALUES LESS THAN ('20221201')); +CREATE TABLE `t` ( `txn_account_id` varchar(24), `account_id` varchar(32), `broker` varchar(10), PRIMARY KEY (`txn_account_id`) /*T![clustered_index] CLUSTERED */ ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; +INSERT INTO `c` (serial_id, txt_account_id, capital_sub_class, occur_trade_date, occur_amount, broker) VALUES ('2022111700196920','04482786','CUST','2022-11-17',-2.01,'0009'); +INSERT INTO `t` VALUES ('04482786','1142927','0009'); +set tidb_partition_prune_mode='dynamic'; +analyze table c; +analyze table t; +explain select +/*+ inl_join(c) */ +c.occur_amount +from +c +join t on c.txt_account_id = t.txn_account_id +and t.broker = '0009' +and c.occur_trade_date = '2022-11-17'; +id estRows task access object operator info +IndexJoin_22 1.00 root inner join, inner:TableReader_21, outer key:executor__partition__issues.t.txn_account_id, inner key:executor__partition__issues.c.txt_account_id, equal cond:eq(executor__partition__issues.t.txn_account_id, executor__partition__issues.c.txt_account_id) +├─TableReader_27(Build) 1.00 root data:Selection_26 +│ └─Selection_26 1.00 cop[tikv] eq(executor__partition__issues.t.broker, "0009") +│ └─TableFullScan_25 1.00 cop[tikv] table:t keep order:false +└─TableReader_21(Probe) 1.00 root partition:all data:Selection_20 + └─Selection_20 1.00 cop[tikv] eq(executor__partition__issues.c.occur_trade_date, 2022-11-17 00:00:00.000000) + └─TableRangeScan_19 1.00 cop[tikv] table:c range: decided by [eq(executor__partition__issues.c.txt_account_id, executor__partition__issues.t.txn_account_id) eq(executor__partition__issues.c.occur_trade_date, 2022-11-17 00:00:00.000000)], keep order:false +select +/*+ inl_join(c) */ +c.occur_amount +from +c +join t on c.txt_account_id = t.txn_account_id +and t.broker = '0009' +and c.occur_trade_date = '2022-11-17'; +occur_amount +-2.01 +alter table t add column serial_id varchar(24) default '2022111700196920'; +select +/*+ inl_join(c) */ +c.occur_amount +from +c +join t on c.txt_account_id = t.txn_account_id +and t.broker = '0009' +and c.occur_trade_date = '2022-11-17' and c.serial_id = t.serial_id; +occur_amount +-2.01 +explain select +/*+ inl_join(c) */ +c.occur_amount +from +c +join t on c.txt_account_id = t.txn_account_id +and t.broker = '0009' +and c.occur_trade_date = '2022-11-17' and c.serial_id = t.serial_id; +id estRows task access object operator info +IndexJoin_20 0.80 root inner join, inner:TableReader_19, outer key:executor__partition__issues.t.txn_account_id, executor__partition__issues.t.serial_id, inner key:executor__partition__issues.c.txt_account_id, executor__partition__issues.c.serial_id, equal cond:eq(executor__partition__issues.t.serial_id, executor__partition__issues.c.serial_id), eq(executor__partition__issues.t.txn_account_id, executor__partition__issues.c.txt_account_id) +├─TableReader_25(Build) 0.80 root data:Selection_24 +│ └─Selection_24 0.80 cop[tikv] eq(executor__partition__issues.t.broker, "0009"), not(isnull(executor__partition__issues.t.serial_id)) +│ └─TableFullScan_23 1.00 cop[tikv] table:t keep order:false +└─TableReader_19(Probe) 0.80 root partition:all data:Selection_18 + └─Selection_18 0.80 cop[tikv] eq(executor__partition__issues.c.occur_trade_date, 2022-11-17 00:00:00.000000) + └─TableRangeScan_17 0.80 cop[tikv] table:c range: decided by [eq(executor__partition__issues.c.txt_account_id, executor__partition__issues.t.txn_account_id) eq(executor__partition__issues.c.serial_id, executor__partition__issues.t.serial_id) eq(executor__partition__issues.c.occur_trade_date, 2022-11-17 00:00:00.000000)], keep order:false +set @@tidb_opt_advanced_join_hint=default; +set tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/r/executor/partition/partition_boundaries.result b/tests/integrationtest/r/executor/partition/partition_boundaries.result new file mode 100644 index 0000000000000..6c4c9152c8eda --- /dev/null +++ b/tests/integrationtest/r/executor/partition/partition_boundaries.result @@ -0,0 +1,5256 @@ +SET @@tidb_partition_prune_mode = 'dynamic'; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1000000), +PARTITION p1 VALUES LESS THAN (2000000), +PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a = -2147483648; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = -2147483648; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a IN (-2147483648); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (-2147483648); +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a = 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 0; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a IN (0); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (0); +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a = 999998; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 999998; +a b +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (999998); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999998); +a b +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 999999; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 999999; +a b +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (999999); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999999); +a b +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 1000000; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 1000000; +a b +1000000 1000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1000000); +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1000000); +a b +1000000 1000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 1000001; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 1000001; +a b +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1000001); +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1000001); +a b +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 1000002; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a = 1000002; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1000002); +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1000002); +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a = 3000000; +id estRows task access object operator info +TableDual 0.00 root rows:0 +SELECT * FROM t WHERE a = 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a IN (3000000); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (3000000); +a b +explain format='brief' SELECT * FROM t WHERE a = 3000001; +id estRows task access object operator info +TableDual 0.00 root rows:0 +SELECT * FROM t WHERE a = 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a IN (3000001); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] eq(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (3000001); +a b +explain format='brief' SELECT * FROM t WHERE a IN (-2147483648, -2147483647); +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, -2147483648, -2147483647) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (-2147483648, -2147483647); +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a IN (-2147483647, -2147483646); +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, -2147483647, -2147483646) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (-2147483647, -2147483646); +a b +explain format='brief' SELECT * FROM t WHERE a IN (999997, 999998, 999999); +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 999997, 999998, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999997, 999998, 999999); +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (999998, 999999, 1000000); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 999998, 999999, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999998, 999999, 1000000); +a b +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (999999, 1000000, 1000001); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 999999, 1000000, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (999999, 1000000, 1000001); +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002); +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 1000000, 1000001, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002); +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999); +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 1999997, 1999998, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999); +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000); +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 1999998, 1999999, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000); +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001); +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 1999999, 2000000, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001); +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002); +id estRows task access object operator info +TableReader 3.00 root partition:p2 data:Selection +└─Selection 3.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 2000000, 2000001, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002); +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999); +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 2999997, 2999998, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999); +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000); +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 2999998, 2999999, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000); +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001); +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 2999999, 3000000, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001); +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] in(executor__partition__partition_boundaries.t.a, 3000000, 3000001, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002); +a b +SET @@tidb_partition_prune_mode = default; +SET @@tidb_partition_prune_mode = 'dynamic'; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1), +PARTITION p1 VALUES LESS THAN (2), +PARTITION p2 VALUES LESS THAN (3), +PARTITION p3 VALUES LESS THAN (4), +PARTITION p4 VALUES LESS THAN (5), +PARTITION p5 VALUES LESS THAN (6), +PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a != -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] or(0, eq(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1; +a b +explain format='brief' SELECT * FROM t WHERE a != 0; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 0; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0); +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0); +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] or(0, or(eq(executor__partition__partition_boundaries.t.a, -1), eq(executor__partition__partition_boundaries.t.a, 0))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a != 1; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 1; +a b +0 0 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1; +id estRows task access object operator info +TableReader 5.00 root partition:all data:Selection +└─Selection 5.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1); +id estRows task access object operator info +TableReader 5.00 root partition:all data:Selection +└─Selection 5.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), eq(executor__partition__partition_boundaries.t.a, 1))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a != 2; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 2; +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2; +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2); +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), or(eq(executor__partition__partition_boundaries.t.a, 1), eq(executor__partition__partition_boundaries.t.a, 2)))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a != 3; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3; +a b +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3); +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3); +a b +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p3 data:Selection +└─Selection 4.00 cop[tikv] or(or(0, or(eq(executor__partition__partition_boundaries.t.a, -1), eq(executor__partition__partition_boundaries.t.a, 0))), or(eq(executor__partition__partition_boundaries.t.a, 1), or(eq(executor__partition__partition_boundaries.t.a, 2), eq(executor__partition__partition_boundaries.t.a, 3)))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE a != 4; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4; +id estRows task access object operator info +TableReader 2.00 root partition:all data:Selection +└─Selection 2.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3), ne(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4; +a b +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4); +id estRows task access object operator info +TableReader 2.00 root partition:all data:Selection +└─Selection 2.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4); +a b +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] or(or(0, or(eq(executor__partition__partition_boundaries.t.a, -1), eq(executor__partition__partition_boundaries.t.a, 0))), or(or(eq(executor__partition__partition_boundaries.t.a, 1), eq(executor__partition__partition_boundaries.t.a, 2)), or(eq(executor__partition__partition_boundaries.t.a, 3), eq(executor__partition__partition_boundaries.t.a, 4)))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a != 5; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5; +id estRows task access object operator info +TableReader 1.00 root partition:all data:Selection +└─Selection 1.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3), ne(executor__partition__partition_boundaries.t.a, 4), ne(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5; +a b +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5); +id estRows task access object operator info +TableReader 1.00 root partition:all data:Selection +└─Selection 1.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3, 4, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5); +a b +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p3,p4,p5 data:Selection +└─Selection 6.00 cop[tikv] or(or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), eq(executor__partition__partition_boundaries.t.a, 1))), or(or(eq(executor__partition__partition_boundaries.t.a, 2), eq(executor__partition__partition_boundaries.t.a, 3)), or(eq(executor__partition__partition_boundaries.t.a, 4), eq(executor__partition__partition_boundaries.t.a, 5)))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE a != 6; +id estRows task access object operator info +TableReader 6.00 root partition:all data:Selection +└─Selection 6.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3), ne(executor__partition__partition_boundaries.t.a, 4), ne(executor__partition__partition_boundaries.t.a, 5), ne(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6; +a b +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6); +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3, 4, 5, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6); +a b +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), eq(executor__partition__partition_boundaries.t.a, 1))), or(or(eq(executor__partition__partition_boundaries.t.a, 2), eq(executor__partition__partition_boundaries.t.a, 3)), or(eq(executor__partition__partition_boundaries.t.a, 4), or(eq(executor__partition__partition_boundaries.t.a, 5), eq(executor__partition__partition_boundaries.t.a, 6))))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a != 7; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a != 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] ne(executor__partition__partition_boundaries.t.a, -1), ne(executor__partition__partition_boundaries.t.a, 0), ne(executor__partition__partition_boundaries.t.a, 1), ne(executor__partition__partition_boundaries.t.a, 2), ne(executor__partition__partition_boundaries.t.a, 3), ne(executor__partition__partition_boundaries.t.a, 4), ne(executor__partition__partition_boundaries.t.a, 5), ne(executor__partition__partition_boundaries.t.a, 6), ne(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7; +a b +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7); +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] not(in(executor__partition__partition_boundaries.t.a, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7); +a b +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(or(or(0, eq(executor__partition__partition_boundaries.t.a, -1)), or(eq(executor__partition__partition_boundaries.t.a, 0), or(eq(executor__partition__partition_boundaries.t.a, 1), eq(executor__partition__partition_boundaries.t.a, 2)))), or(or(eq(executor__partition__partition_boundaries.t.a, 3), eq(executor__partition__partition_boundaries.t.a, 4)), or(eq(executor__partition__partition_boundaries.t.a, 5), or(eq(executor__partition__partition_boundaries.t.a, 6), eq(executor__partition__partition_boundaries.t.a, 7))))) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +SET @@tidb_partition_prune_mode = default; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1000000), +PARTITION p1 VALUES LESS THAN (2000000), +PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483649) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483647) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483646) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2147483638) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483650) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483649) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483647) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648), le(executor__partition__partition_boundaries.t.a, -2146483646) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND -1; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 0; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 1; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 2; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 2; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 10; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 10) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 10; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 999998; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 999998; +a b +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 999999; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 999999; +a b +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000000; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 1000000; +a b +0 0 Filler... +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000001; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 1000001; +a b +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000002; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 1000002; +a b +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999997; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 999997; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999998; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 999998; +a b +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999999; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 999999; +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000; +a b +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000008) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1999996) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000; +id estRows task access object operator info +TableReader 8.00 root partition:all data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 999998; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 999998; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 999999; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 999999; +a b +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000; +a b +1000000 1000000 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000009) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001; +id estRows task access object operator info +TableReader 8.00 root partition:all data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000; +a b +1000000 1000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002; +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010; +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000010) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998; +id estRows task access object operator info +TableReader 4.00 root partition:p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999; +id estRows task access object operator info +TableReader 5.00 root partition:p1 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002; +id estRows task access object operator info +TableReader 8.00 root partition:p1,p2 data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000; +id estRows task access object operator info +TableReader 0.00 root partition:p1 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001; +a b +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1000011) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999; +id estRows task access object operator info +TableReader 4.00 root partition:p1 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000; +id estRows task access object operator info +TableReader 5.00 root partition:p1,p2 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001), le(executor__partition__partition_boundaries.t.a, 2000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001; +id estRows task access object operator info +TableReader 0.00 root partition:p1 data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000004) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 1000012) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012; +a b +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000; +id estRows task access object operator info +TableReader 4.00 root partition:p1,p2 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001; +id estRows task access object operator info +TableReader 5.00 root partition:p1,p2 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002), le(executor__partition__partition_boundaries.t.a, 2000004) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000010) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 4000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 4000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 4000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3000011) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 3999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 4000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 4000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 4000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001), le(executor__partition__partition_boundaries.t.a, 4000003) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003; +a b +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1), +PARTITION p1 VALUES LESS THAN (2), +PARTITION p2 VALUES LESS THAN (3), +PARTITION p3 VALUES LESS THAN (4), +PARTITION p4 VALUES LESS THAN (5), +PARTITION p5 VALUES LESS THAN (6), +PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND -1; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN -1 AND 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -1), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN -1 AND 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 0; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 0 AND 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 1; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1 AND 4; +id estRows task access object operator info +TableReader 4.00 root partition:p1,p2,p3,p4 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 1 AND 4; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 2; +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 4; +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 4; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 3; +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3 AND 4; +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 3 AND 4; +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 4; +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 4; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 4 AND 4; +id estRows task access object operator info +TableReader 1.00 root partition:p4 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 4), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 4 AND 4; +a b +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 5; +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 5; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 5 AND 4; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 5), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 5 AND 4; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 6; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 6; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 6 AND 4; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 6), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 6 AND 4; +a b +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 7; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 2 AND 7; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a BETWEEN 7 AND 4; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 7), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a BETWEEN 7 AND 4; +a b +set @@tidb_partition_prune_mode = 'dynamic'; +drop table if exists t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1000000), +PARTITION p1 VALUES LESS THAN (2000000), +PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a < -2147483648; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < -2147483648; +a b +explain format='brief' SELECT * FROM t WHERE a > -2147483648; +id estRows task access object operator info +TableReader 13.00 root partition:all data:Selection +└─Selection 13.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > -2147483648; +a b +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= -2147483648; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= -2147483648; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a >= -2147483648; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -2147483648) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= -2147483648; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 0; +a b +-2147483648 MIN_INT filler... +explain format='brief' SELECT * FROM t WHERE a > 0; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 0; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 0; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 0; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 0; +id estRows task access object operator info +TableReader 13.00 root partition:all data:Selection +└─Selection 13.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 0; +a b +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 999998; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a > 999998; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999998; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999998; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 999999; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999; +id estRows task access object operator info +TableReader 10.00 root partition:p1,p2 data:Selection +└─Selection 10.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999999; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999999; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1000000; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000; +id estRows task access object operator info +TableReader 9.00 root partition:p1,p2 data:Selection +└─Selection 9.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1000000; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000000; +id estRows task access object operator info +TableReader 10.00 root partition:p1,p2 data:Selection +└─Selection 10.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1000001; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1000001; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000001; +id estRows task access object operator info +TableReader 8.00 root partition:p1,p2 data:Selection +└─Selection 8.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000001; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1000001; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1000001; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000001; +id estRows task access object operator info +TableReader 9.00 root partition:p1,p2 data:Selection +└─Selection 9.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000001; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1000002; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1 data:Selection +└─Selection 6.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1000002; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000002; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000002; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1000002; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1000002; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000002; +id estRows task access object operator info +TableReader 8.00 root partition:p1,p2 data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000002; +a b +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 3000000; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 3000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a <= 3000000; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 3000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a < 3000001; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 3000001; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a <= 3000001; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 3000001; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a < 999997; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a > 999997; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999997; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999997; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 999997; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999997; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999997 AND a <= 999999; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999997), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999997 AND a <= 999999; +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a <= 999999; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999997), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999997 AND a <= 999999; +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a < 999999; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999997), lt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999997 AND a < 999999; +a b +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a <= 999999; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999997), le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999997 AND a <= 999999; +a b +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 999998; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a > 999998; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999998; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999998; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999998; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999998 AND a <= 1000000; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999998 AND a <= 1000000; +a b +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +a b +1000000 1000000 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a < 1000000; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998), lt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998 AND a < 1000000; +a b +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999998), le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +a b +1000000 1000000 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 999999; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999; +id estRows task access object operator info +TableReader 10.00 root partition:p1,p2 data:Selection +└─Selection 10.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 999999; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999999; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999999; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 999999 AND a <= 1000001; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 999999 AND a <= 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a < 1000001; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999), lt(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999 AND a < 1000001; +a b +1000000 1000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 999999), le(executor__partition__partition_boundaries.t.a, 1000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1000000; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000; +id estRows task access object operator info +TableReader 9.00 root partition:p1,p2 data:Selection +└─Selection 9.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1000000; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000000; +id estRows task access object operator info +TableReader 10.00 root partition:p1,p2 data:Selection +└─Selection 10.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000000; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002; +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002; +a b +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a < 1000002; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000), lt(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000 AND a < 1000002; +a b +1000001 1000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1000000), le(executor__partition__partition_boundaries.t.a, 1000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +a b +1000001 1000001 Filler ... +1000002 1000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1999997; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999997; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999997; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1999997; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999997; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999997; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999997), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999997), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a < 1999999; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999997), lt(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999997 AND a < 1999999; +a b +1999998 1999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999997), le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1999998; +id estRows task access object operator info +TableReader 7.00 root partition:p0,p1 data:Selection +└─Selection 7.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999998; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999998; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1999998; +id estRows task access object operator info +TableReader 8.00 root partition:p0,p1 data:Selection +└─Selection 8.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999998; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999998; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000; +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999998), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000; +a b +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +id estRows task access object operator info +TableReader 2.00 root partition:p1,p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999998), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a < 2000000; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999998), lt(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999998 AND a < 2000000; +a b +1999999 1999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +id estRows task access object operator info +TableReader 2.00 root partition:p1,p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999998), le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 1999999; +id estRows task access object operator info +TableReader 8.00 root partition:p0,p1 data:Selection +└─Selection 8.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999999; +id estRows task access object operator info +TableReader 5.00 root partition:p2 data:Selection +└─Selection 5.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999999; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 1999999; +id estRows task access object operator info +TableReader 9.00 root partition:p0,p1 data:Selection +└─Selection 9.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999999; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999999; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001; +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1999999), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001; +a b +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999999), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a < 2000001; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999999), lt(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999999 AND a < 2000001; +a b +2000000 2000000 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1999999), le(executor__partition__partition_boundaries.t.a, 2000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 2000000; +id estRows task access object operator info +TableReader 9.00 root partition:p0,p1 data:Selection +└─Selection 9.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2000000; +id estRows task access object operator info +TableReader 4.00 root partition:p2 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2000000; +a b +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 2000000; +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2000000; +id estRows task access object operator info +TableReader 5.00 root partition:p2 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2000000; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002; +id estRows task access object operator info +TableReader 3.00 root partition:p2 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2000000), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002; +a b +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2000000), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +a b +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a < 2000002; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2000000), lt(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2000000 AND a < 2000002; +a b +2000001 2000001 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2000000), le(executor__partition__partition_boundaries.t.a, 2000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +a b +2000001 2000001 Filler ... +2000002 2000002 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 2999997; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999997; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999997; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 2999997; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2999997; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999997; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999997) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999997; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999997), le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999997), le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a < 2999999; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999997), lt(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999997 AND a < 2999999; +a b +2999998 2999998 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999997), le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 2999998; +id estRows task access object operator info +TableReader 12.00 root partition:all data:Selection +└─Selection 12.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999998; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999998; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a <= 2999998; +id estRows task access object operator info +TableReader 13.00 root partition:all data:Selection +└─Selection 13.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2999998; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999998; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999998) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999998; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000; +id estRows task access object operator info +TableReader 2.00 root partition:p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999998), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000; +a b +2999998 2999998 Filler ... +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999998), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a < 3000000; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999998), lt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999998 AND a < 3000000; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999998), le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a < 2999999; +id estRows task access object operator info +TableReader 13.00 root partition:all data:Selection +└─Selection 13.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999999; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999999; +a b +explain format='brief' SELECT * FROM t WHERE a <= 2999999; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2999999; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999999; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999999) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999999; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2999999), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001; +a b +2999999 2999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999999), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a < 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999999), lt(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999999 AND a < 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2999999), le(executor__partition__partition_boundaries.t.a, 3000001) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +a b +explain format='brief' SELECT * FROM t WHERE a < 3000000; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 3000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a > 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a <= 3000000; +id estRows task access object operator info +TableReader 14.00 root partition:all data:Selection +└─Selection 14.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 3000000; +a b +-2147483648 MIN_INT filler... +0 0 Filler... +1000000 1000000 Filler ... +1000001 1000001 Filler ... +1000002 1000002 Filler ... +1999998 1999998 Filler ... +1999999 1999999 Filler ... +2000000 2000000 Filler ... +2000001 2000001 Filler ... +2000002 2000002 Filler ... +2999998 2999998 Filler ... +2999999 2999999 Filler ... +999998 999998 Filler ... +999999 999999 Filler ... +explain format='brief' SELECT * FROM t WHERE a >= 3000000; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3000000; +a b +explain format='brief' SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a < 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000), lt(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000 AND a < 3000002; +a b +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3000000), le(executor__partition__partition_boundaries.t.a, 3000002) + └─TableFullScan 14.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +a b +set @@tidb_partition_prune_mode = default; +set @@tidb_partition_prune_mode = 'dynamic'; +drop table if exists t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( +PARTITION p0 VALUES LESS THAN (1), +PARTITION p1 VALUES LESS THAN (2), +PARTITION p2 VALUES LESS THAN (3), +PARTITION p3 VALUES LESS THAN (4), +PARTITION p4 VALUES LESS THAN (5), +PARTITION p5 VALUES LESS THAN (6), +PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a < -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < -1; +a b +explain format='brief' SELECT * FROM t WHERE a > -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= -1; +a b +explain format='brief' SELECT * FROM t WHERE a >= -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < -1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > -1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > -1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < -1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= -1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= -1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= -1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > -1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > -1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= -1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= -1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= -1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, -1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= -1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, -1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 0; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 0; +a b +explain format='brief' SELECT * FROM t WHERE a > 0; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 0; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 0; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 0; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 0; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 0); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 0); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 0); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 0); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 0; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 0); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 0); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 0); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 0); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 0; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 0); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 0); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 0); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 0); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 0; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 0; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 0; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 0) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 0; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 0)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 1; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 1; +a b +0 0 Filler... +explain format='brief' SELECT * FROM t WHERE a > 1; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 1; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 1; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 1; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 1; +id estRows task access object operator info +TableReader 6.00 root partition:p1,p2,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 1; +a b +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 1; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 1; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 1; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 1) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 1; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 1)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 2; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 2; +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 2; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 2; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 2); +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 2); +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 2); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 2); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 2; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 2; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 2; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 2); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 2); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 2); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 2); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 2; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 2; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 2; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 2); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 2); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 2); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 2); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 2; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 2; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 2) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 2; +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2); +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 2)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2); +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 3; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a > 3; +id estRows task access object operator info +TableReader 3.00 root partition:p4,p5,p6 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 3; +a b +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 3; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p3 data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 3; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 3; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 3; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 3; +a b +0 0 Filler... +1 1 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 3; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 3; +a b +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 3); +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 3); +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 3); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 3); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 3; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 3; +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 3; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 3; +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 3); +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 3); +a b +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 3); +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p3,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 3); +a b +0 0 Filler... +1 1 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 3; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 3; +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 3; +a b +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 3); +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 3); +a b +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 3); +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 3); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 3; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 3; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 3) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 3; +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3); +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3); +a b +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3); +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 3)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3); +a b +0 0 Filler... +1 1 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 4; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p3 data:Selection +└─Selection 4.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE a > 4; +id estRows task access object operator info +TableReader 2.00 root partition:p5,p6 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 4; +a b +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 4; +id estRows task access object operator info +TableReader 3.00 root partition:p4,p5,p6 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 4; +a b +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 4; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 4; +a b +0 0 Filler... +1 1 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 4; +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 4; +a b +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 4); +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 4); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 4); +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 4); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 4; +a b +0 0 Filler... +1 1 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 4; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 4; +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 4); +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 4); +a b +2 2 Filler... +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 4); +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 4); +a b +0 0 Filler... +1 1 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 4; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 4; +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 4; +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 4); +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 4); +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 4); +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 4); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 4; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p4,p5,p6 data:Selection +└─Selection 6.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 4; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 4; +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 4) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 4; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4); +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4); +a b +3 3 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 4)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4); +a b +0 0 Filler... +1 1 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 5; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p3,p4 data:Selection +└─Selection 5.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE a > 5; +id estRows task access object operator info +TableReader 1.00 root partition:p6 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 5; +a b +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 5; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p3,p4,p5 data:Selection +└─Selection 6.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 5; +id estRows task access object operator info +TableReader 2.00 root partition:p5,p6 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 5; +a b +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 5; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p6 data:Selection +└─Selection 3.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 5; +a b +0 0 Filler... +1 1 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 5; +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 5; +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 5); +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 5); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 5); +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 5); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 5; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 5; +a b +0 0 Filler... +1 1 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 5; +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 5; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 5); +id estRows task access object operator info +TableReader 3.00 root partition:p2,p3,p4 data:Selection +└─Selection 3.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 5); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 5); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 5); +a b +0 0 Filler... +1 1 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 5; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p6 data:Selection +└─Selection 4.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 5; +id estRows task access object operator info +TableReader 3.00 root partition:p3,p4,p5 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 5; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 5); +id estRows task access object operator info +TableReader 3.00 root partition:p3,p4,p5 data:Selection +└─Selection 3.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 5); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 5); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p6 data:Selection +└─Selection 4.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 5); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 5; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1,p2,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 5; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 5; +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 5) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 5; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5); +id estRows task access object operator info +TableReader 2.00 root partition:p3,p4 data:Selection +└─Selection 2.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5); +a b +3 3 Filler... +4 4 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p6 data:Selection +└─Selection 3.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 5)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5); +a b +0 0 Filler... +1 1 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 6; +id estRows task access object operator info +TableReader 6.00 root partition:p0,p1,p2,p3,p4,p5 data:Selection +└─Selection 6.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE a > 6; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 6; +a b +explain format='brief' SELECT * FROM t WHERE a <= 6; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 6; +id estRows task access object operator info +TableReader 1.00 root partition:p6 data:Selection +└─Selection 1.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 6; +a b +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 6; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 6; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 6; +id estRows task access object operator info +TableReader 3.00 root partition:p3,p4,p5 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 6; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 6); +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 6); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 6); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p6 data:Selection +└─Selection 4.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 6); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 6; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p6 data:Selection +└─Selection 3.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 6; +a b +0 0 Filler... +1 1 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 6; +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 6; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 6); +id estRows task access object operator info +TableReader 4.00 root partition:p2,p3,p4,p5 data:Selection +└─Selection 4.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 6); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 6); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p6 data:Selection +└─Selection 3.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 6); +a b +0 0 Filler... +1 1 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 6; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 6; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 6; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 6); +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 6); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 6); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 6); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 6; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1,p2,p6 data:Selection +└─Selection 4.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 6; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 6; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 6) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 6; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6); +id estRows task access object operator info +TableReader 3.00 root partition:p3,p4,p5 data:Selection +└─Selection 3.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 6)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6); +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a < 7; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] lt(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a > 7; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 7; +a b +explain format='brief' SELECT * FROM t WHERE a <= 7; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] le(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 7; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 7; +a b +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 7; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a > 7; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 7; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a < 7; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 7); +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a > 7); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 7); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a < 7); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 7; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a < 2 OR a >= 7; +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 7; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a < 7; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 7); +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] and(ge(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a < 2 OR a >= 7); +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 7); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a < 7); +a b +0 0 Filler... +1 1 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 7; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a > 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 7; +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a > 2 AND a <= 7; +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 7); +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a > 7); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 7); +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a > 2 AND a <= 7); +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 7; +id estRows task access object operator info +TableReader 3.00 root partition:p0,p1,p2 data:Selection +└─Selection 3.00 cop[tikv] or(le(executor__partition__partition_boundaries.t.a, 2), ge(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a <= 2 OR a >= 7; +a b +0 0 Filler... +1 1 Filler... +2 2 Filler... +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 7; +id estRows task access object operator info +TableReader 5.00 root partition:p2,p3,p4,p5,p6 data:Selection +└─Selection 5.00 cop[tikv] ge(executor__partition__partition_boundaries.t.a, 2), le(executor__partition__partition_boundaries.t.a, 7) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE a >= 2 AND a <= 7; +a b +2 2 Filler... +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7); +id estRows task access object operator info +TableReader 4.00 root partition:p3,p4,p5,p6 data:Selection +└─Selection 4.00 cop[tikv] and(gt(executor__partition__partition_boundaries.t.a, 2), lt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7); +a b +3 3 Filler... +4 4 Filler... +5 5 Filler... +6 6 Filler... +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(lt(executor__partition__partition_boundaries.t.a, 2), gt(executor__partition__partition_boundaries.t.a, 7)) + └─TableFullScan 7.00 cop[tikv] table:t keep order:false +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7); +a b +0 0 Filler... +1 1 Filler... +set @@tidb_partition_prune_mode = default; diff --git a/tests/integrationtest/r/executor/partition/partition_with_expression.result b/tests/integrationtest/r/executor/partition/partition_with_expression.result new file mode 100644 index 0000000000000..5435f6baeb059 --- /dev/null +++ b/tests/integrationtest/r/executor/partition/partition_with_expression.result @@ -0,0 +1,1250 @@ +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00")); +create table t(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00")); +insert into tp values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3); +insert into t values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3); +analyze table tp; +analyze table t; +explain format='brief' select * from tp where a != '2024-01-01 01:01:01'; +id estRows task access object operator info +TableReader 2.00 root partition:all data:Selection +└─Selection 2.00 cop[tikv] ne(executor__partition__partition_with_expression.tp.a, 2024-01-01 01:01:01.000000) + └─TableFullScan 3.00 cop[tikv] table:tp keep order:false +select * from tp where a != '2024-01-01 01:01:01'; +a b +2015-09-09 00:00:00 1 +2020-08-08 19:00:01 2 +select * from t where a != '2024-01-01 01:01:01'; +a b +2015-09-09 00:00:00 1 +2020-08-08 19:00:01 2 +explain format='brief' select * from tp where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +id estRows task access object operator info +TableReader 1.00 root partition:p1,p2 data:Selection +└─Selection 1.00 cop[tikv] gt(executor__partition__partition_with_expression.tp.a, 2015-09-09 00:00:00.000000), ne(executor__partition__partition_with_expression.tp.a, 2024-01-01 01:01:01.000000) + └─TableFullScan 3.00 cop[tikv] table:tp keep order:false +select * from tp where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +a b +2020-08-08 19:00:01 2 +select * from t where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +a b +2020-08-08 19:00:01 2 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range(weekday(a)) (partition p0 values less than(3), partition p1 values less than(5), partition p2 values less than(8)); +create table t(a datetime, b int); +insert into tp values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0); +insert into t values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0); +analyze table tp; +analyze table t; +explain format='brief' select * from tp where a = '2020-08-17 00:00:00'; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.tp.a, 2020-08-17 00:00:00.000000) + └─TableFullScan 6.00 cop[tikv] table:tp keep order:false +select * from tp where a = '2020-08-17 00:00:00'; +a b +2020-08-17 00:00:00 1 +select * from t where a = '2020-08-17 00:00:00'; +a b +2020-08-17 00:00:00 1 +explain format='brief' select * from tp where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.tp.a, 2020-08-20 00:00:00.000000) + └─TableFullScan 6.00 cop[tikv] table:tp keep order:false +select * from tp where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +a b +2020-08-20 00:00:00 5 +select * from t where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +a b +2020-08-20 00:00:00 5 +explain format='brief' select * from tp where a < '2020-08-19 00:00:00'; +id estRows task access object operator info +TableReader 2.00 root partition:all data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, 2020-08-19 00:00:00.000000) + └─TableFullScan 6.00 cop[tikv] table:tp keep order:false +select * from tp where a < '2020-08-19 00:00:00'; +a b +2020-08-17 00:00:00 1 +2020-08-18 00:00:00 2 +select * from t where a < '2020-08-19 00:00:00'; +a b +2020-08-17 00:00:00 1 +2020-08-18 00:00:00 2 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a timestamp, b int) partition by range(floor(unix_timestamp(a))) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400)); +create table t(a timestamp, b int); +insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +analyze table tp; +analyze table t; +explain select * from tp where a > '2020-09-11 00:00:00'; +id estRows task access object operator info +TableReader_7 1.00 root partition:p2 data:Selection_6 +└─Selection_6 1.00 cop[tikv] gt(executor__partition__partition_with_expression.tp.a, 2020-09-11 00:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a > '2020-09-11 00:00:00'; +a b +2020-10-01 14:13:15 3 +select * from t where a > '2020-09-11 00:00:00'; +a b +2020-10-01 14:13:15 3 +explain select * from tp where a < '2020-07-07 01:00:00'; +id estRows task access object operator info +TableReader_7 1.00 root partition:p0,p1 data:Selection_6 +└─Selection_6 1.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, 2020-07-07 01:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a < '2020-07-07 01:00:00'; +a b +2020-01-01 19:00:00 1 +select * from t where a < '2020-07-07 01:00:00'; +a b +2020-01-01 19:00:00 1 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a timestamp, b int) partition by range(unix_timestamp(a)) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400)); +create table t(a timestamp, b int); +insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +analyze table tp; +analyze table t; +explain select * from tp where a > '2020-09-11 00:00:00'; +id estRows task access object operator info +TableReader_7 1.00 root partition:p2 data:Selection_6 +└─Selection_6 1.00 cop[tikv] gt(executor__partition__partition_with_expression.tp.a, 2020-09-11 00:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a > '2020-09-11 00:00:00'; +a b +2020-10-01 14:13:15 3 +select * from t where a > '2020-09-11 00:00:00'; +a b +2020-10-01 14:13:15 3 +explain select * from tp where a < '2020-07-07 01:00:00'; +id estRows task access object operator info +TableReader_7 1.00 root partition:p0,p1 data:Selection_6 +└─Selection_6 1.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, 2020-07-07 01:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a < '2020-07-07 01:00:00'; +a b +2020-01-01 19:00:00 1 +select * from t where a < '2020-07-07 01:00:00'; +a b +2020-01-01 19:00:00 1 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range columns(a) (partition p0 values less than('2020-02-02 00:00:00'), partition p1 values less than('2020-09-01 00:00:00'), partition p2 values less than('2020-12-20 00:00:00')); +create table t(a datetime, b int); +insert into tp values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4); +insert into t values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4); +analyze table tp; +analyze table t; +explain select * from tp where a < '2020-09-01 00:00:00'; +id estRows task access object operator info +TableReader_7 2.00 root partition:p0,p1 data:Selection_6 +└─Selection_6 2.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, 2020-09-01 00:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a < '2020-09-01 00:00:00'; +a b +2020-01-01 12:00:00 1 +2020-08-22 10:00:00 2 +select * from t where a < '2020-09-01 00:00:00'; +a b +2020-01-01 12:00:00 1 +2020-08-22 10:00:00 2 +explain select * from tp where a > '2020-07-07 01:00:00'; +id estRows task access object operator info +TableReader_7 3.00 root partition:p1,p2 data:Selection_6 +└─Selection_6 3.00 cop[tikv] gt(executor__partition__partition_with_expression.tp.a, 2020-07-07 01:00:00.000000) + └─TableFullScan_5 4.00 cop[tikv] table:tp keep order:false +select * from tp where a > '2020-07-07 01:00:00'; +a b +2020-08-22 10:00:00 2 +2020-09-09 11:00:00 3 +2020-10-01 00:00:00 4 +select * from t where a > '2020-07-07 01:00:00'; +a b +2020-08-22 10:00:00 2 +2020-09-09 11:00:00 3 +2020-10-01 00:00:00 4 +set tidb_partition_prune_mode=default; +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a varchar(255), b int) partition by range columns(a) (partition p0 values less than('ddd'), partition p1 values less than('ggggg'), partition p2 values less than('mmmmmm')); +create table t(a varchar(255), b int); +insert into tp values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6); +insert into t values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6); +analyze table tp; +analyze table t; +explain select * from tp where a < '10'; +id estRows task access object operator info +TableReader_7 0.00 root partition:p0 data:Selection_6 +└─Selection_6 0.00 cop[tikv] lt(executor__partition__partition_with_expression.tp.a, "10") + └─TableFullScan_5 6.00 cop[tikv] table:tp keep order:false +select * from tp where a < '10'; +a b +select * from t where a < '10'; +a b +explain select * from tp where a > 0; +id estRows task access object operator info +TableReader_7 4.80 root partition:all data:Selection_6 +└─Selection_6 4.80 cop[tikv] gt(cast(executor__partition__partition_with_expression.tp.a, double BINARY), 0) + └─TableFullScan_5 6.00 cop[tikv] table:tp keep order:false +select * from tp where a > 0; +a b +10 6 +select * from t where a > 0; +a b +10 6 +explain select * from tp where a < 0; +id estRows task access object operator info +TableReader_7 4.80 root partition:all data:Selection_6 +└─Selection_6 4.80 cop[tikv] lt(cast(executor__partition__partition_with_expression.tp.a, double BINARY), 0) + └─TableFullScan_5 6.00 cop[tikv] table:tp keep order:false +select * from tp where a < 0; +a b +select * from t where a < 0; +a b +set tidb_partition_prune_mode=default; +drop table if exists trange, thash, t; +create table trange(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11)); +create table thash(a int, b int) partition by hash(a) partitions 4; +create table t(a int, b int); +insert into trange values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +insert into thash values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +insert into t values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +set session tidb_partition_prune_mode='dynamic'; +analyze table trange; +analyze table thash; +analyze table t; +SELECT * from t where a = 2; +a b +2 1 +explain format='brief' select * from trange where a = 2; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2; +a b +2 1 +explain format='brief' select * from thash where a = 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2; +a b +2 1 +SELECT * from t where a = 4 or a = 1; +a b +1 NULL +1 NULL +1 1 +4 3 +explain format='brief' select * from trange where a = 4 or a = 1; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 4), eq(executor__partition__partition_with_expression.trange.a, 1)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 4 or a = 1; +a b +1 NULL +1 NULL +1 1 +4 3 +explain format='brief' select * from thash where a = 4 or a = 1; +id estRows task access object operator info +TableReader 4.00 root partition:p0,p1 data:Selection +└─Selection 4.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 4), eq(executor__partition__partition_with_expression.thash.a, 1)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 4 or a = 1; +a b +1 NULL +1 NULL +1 1 +4 3 +SELECT * from t where a = -1; +a b +explain format='brief' select * from trange where a = -1; +id estRows task access object operator info +TableReader 0.00 root partition:p0 data:Selection +└─Selection 0.00 cop[tikv] eq(executor__partition__partition_with_expression.trange.a, -1) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = -1; +a b +explain format='brief' select * from thash where a = -1; +id estRows task access object operator info +TableReader 0.00 root partition:p1 data:Selection +└─Selection 0.00 cop[tikv] eq(executor__partition__partition_with_expression.thash.a, -1) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = -1; +a b +SELECT * from t where a is NULL; +a b +NULL NULL +NULL 1 +explain format='brief' select * from trange where a is NULL; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] isnull(executor__partition__partition_with_expression.trange.a) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a is NULL; +a b +NULL NULL +NULL 1 +explain format='brief' select * from thash where a is NULL; +id estRows task access object operator info +TableReader 2.00 root partition:p0 data:Selection +└─Selection 2.00 cop[tikv] isnull(executor__partition__partition_with_expression.thash.a) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a is NULL; +a b +NULL NULL +NULL 1 +SELECT * from t where b is NULL; +a b +NULL NULL +1 NULL +1 NULL +10 NULL +explain format='brief' select * from trange where b is NULL; +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] isnull(executor__partition__partition_with_expression.trange.b) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where b is NULL; +a b +NULL NULL +1 NULL +1 NULL +10 NULL +explain format='brief' select * from thash where b is NULL; +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] isnull(executor__partition__partition_with_expression.thash.b) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where b is NULL; +a b +NULL NULL +1 NULL +1 NULL +10 NULL +SELECT * from t where a > -1; +a b +1 NULL +1 NULL +1 1 +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a > -1; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, -1) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > -1; +a b +1 NULL +1 NULL +1 1 +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a > -1; +id estRows task access object operator info +TableReader 11.00 root partition:all data:Selection +└─Selection 11.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, -1) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > -1; +a b +1 NULL +1 NULL +1 1 +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a >= 4 and a <= 5; +a b +4 3 +5 5 +explain format='brief' select * from trange where a >= 4 and a <= 5; +id estRows task access object operator info +TableReader 2.00 root partition:p1,p2 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.trange.a, 4), le(executor__partition__partition_with_expression.trange.a, 5) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a >= 4 and a <= 5; +a b +4 3 +5 5 +explain format='brief' select * from thash where a >= 4 and a <= 5; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.thash.a, 4), le(executor__partition__partition_with_expression.thash.a, 5) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a >= 4 and a <= 5; +a b +4 3 +5 5 +SELECT * from t where a > 10; +a b +explain format='brief' select * from trange where a > 10; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 10) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > 10; +a b +explain format='brief' select * from thash where a > 10; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 10) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > 10; +a b +SELECT * from t where a >=2 and a <= 3; +a b +2 1 +3 2 +explain format='brief' select * from trange where a >=2 and a <= 3; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.trange.a, 2), le(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a >=2 and a <= 3; +a b +2 1 +3 2 +explain format='brief' select * from thash where a >=2 and a <= 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.thash.a, 2), le(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a >=2 and a <= 3; +a b +2 1 +3 2 +SELECT * from t where a between 2 and 3; +a b +2 1 +3 2 +explain format='brief' select * from trange where a between 2 and 3; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.trange.a, 2), le(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a between 2 and 3; +a b +2 1 +3 2 +explain format='brief' select * from thash where a between 2 and 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] ge(executor__partition__partition_with_expression.thash.a, 2), le(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a between 2 and 3; +a b +2 1 +3 2 +SELECT * from t where a < 2; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from trange where a < 2; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a < 2; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from thash where a < 2; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a < 2; +a b +1 NULL +1 NULL +1 1 +SELECT * from t where a <= 3; +a b +1 NULL +1 NULL +1 1 +2 1 +3 2 +explain format='brief' select * from trange where a <= 3; +id estRows task access object operator info +TableReader 5.00 root partition:p0,p1 data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a <= 3; +a b +1 NULL +1 NULL +1 1 +2 1 +3 2 +explain format='brief' select * from thash where a <= 3; +id estRows task access object operator info +TableReader 5.00 root partition:all data:Selection +└─Selection 5.00 cop[tikv] le(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a <= 3; +a b +1 NULL +1 NULL +1 1 +2 1 +3 2 +SELECT * from t where a in (2, 3); +a b +2 1 +3 2 +explain format='brief' select * from trange where a in (2, 3); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_with_expression.trange.a, 2, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a in (2, 3); +a b +2 1 +3 2 +explain format='brief' select * from thash where a in (2, 3); +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] in(executor__partition__partition_with_expression.thash.a, 2, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a in (2, 3); +a b +2 1 +3 2 +SELECT * from t where a in (1, 5); +a b +1 NULL +1 NULL +1 1 +5 5 +explain format='brief' select * from trange where a in (1, 5); +id estRows task access object operator info +TableReader 4.00 root partition:p0,p2 data:Selection +└─Selection 4.00 cop[tikv] in(executor__partition__partition_with_expression.trange.a, 1, 5) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a in (1, 5); +a b +1 NULL +1 NULL +1 1 +5 5 +explain format='brief' select * from thash where a in (1, 5); +id estRows task access object operator info +TableReader 4.00 root partition:p1 data:Selection +└─Selection 4.00 cop[tikv] in(executor__partition__partition_with_expression.thash.a, 1, 5) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a in (1, 5); +a b +1 NULL +1 NULL +1 1 +5 5 +SELECT * from t where a not in (1, 5); +a b +10 NULL +2 1 +3 2 +4 3 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a not in (1, 5); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] not(in(executor__partition__partition_with_expression.trange.a, 1, 5)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a not in (1, 5); +a b +10 NULL +2 1 +3 2 +4 3 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a not in (1, 5); +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] not(in(executor__partition__partition_with_expression.thash.a, 1, 5)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a not in (1, 5); +a b +10 NULL +2 1 +3 2 +4 3 +6 7 +7 7 +7 7 +SELECT * from t where a = 2 and a = 2; +a b +2 1 +explain format='brief' select * from trange where a = 2 and a = 2; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 and a = 2; +a b +2 1 +explain format='brief' select * from thash where a = 2 and a = 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 and a = 2; +a b +2 1 +SELECT * from t where a = 2 and a = 3; +a b +explain format='brief' select * from trange where a = 2 and a = 3; +id estRows task access object operator info +TableDual 0.00 root rows:0 +SELECT * from trange where a = 2 and a = 3; +a b +explain format='brief' select * from thash where a = 2 and a = 3; +id estRows task access object operator info +TableDual 0.00 root rows:0 +SELECT * from thash where a = 2 and a = 3; +a b +SELECT * from t where a < 2 and a > 0; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from trange where a < 2 and a > 0; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 0), lt(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a < 2 and a > 0; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from thash where a < 2 and a > 0; +id estRows task access object operator info +TableReader 3.00 root partition:p1 data:Selection +└─Selection 3.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 0), lt(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a < 2 and a > 0; +a b +1 NULL +1 NULL +1 1 +SELECT * from t where a < 2 and a < 3; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from trange where a < 2 and a < 3; +id estRows task access object operator info +TableReader 3.00 root partition:p0 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_with_expression.trange.a, 2), lt(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a < 2 and a < 3; +a b +1 NULL +1 NULL +1 1 +explain format='brief' select * from thash where a < 2 and a < 3; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__partition_with_expression.thash.a, 2), lt(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a < 2 and a < 3; +a b +1 NULL +1 NULL +1 1 +SELECT * from t where a > 1 and a > 2; +a b +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a > 1 and a > 2; +id estRows task access object operator info +TableReader 7.00 root partition:p1,p2 data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 1), gt(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > 1 and a > 2; +a b +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a > 1 and a > 2; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 1), gt(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > 1 and a > 2; +a b +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a = 2 or a = 3; +a b +2 1 +3 2 +explain format='brief' select * from trange where a = 2 or a = 3; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), eq(executor__partition__partition_with_expression.trange.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a = 3; +a b +2 1 +3 2 +explain format='brief' select * from thash where a = 2 or a = 3; +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), eq(executor__partition__partition_with_expression.thash.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a = 3; +a b +2 1 +3 2 +SELECT * from t where a = 2 or a in (3); +a b +2 1 +3 2 +explain format='brief' select * from trange where a = 2 or a in (3); +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), eq(executor__partition__partition_with_expression.trange.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a in (3); +a b +2 1 +3 2 +explain format='brief' select * from thash where a = 2 or a in (3); +id estRows task access object operator info +TableReader 2.00 root partition:p2,p3 data:Selection +└─Selection 2.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), eq(executor__partition__partition_with_expression.thash.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a in (3); +a b +2 1 +3 2 +SELECT * from t where a = 2 or a > 3; +a b +10 NULL +2 1 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a = 2 or a > 3; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), gt(executor__partition__partition_with_expression.trange.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a > 3; +a b +10 NULL +2 1 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a = 2 or a > 3; +id estRows task access object operator info +TableReader 7.00 root partition:all data:Selection +└─Selection 7.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), gt(executor__partition__partition_with_expression.thash.a, 3)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a > 3; +a b +10 NULL +2 1 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a = 2 or a <= 1; +a b +1 NULL +1 NULL +1 1 +2 1 +explain format='brief' select * from trange where a = 2 or a <= 1; +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), le(executor__partition__partition_with_expression.trange.a, 1)) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a <= 1; +a b +1 NULL +1 NULL +1 1 +2 1 +explain format='brief' select * from thash where a = 2 or a <= 1; +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), le(executor__partition__partition_with_expression.thash.a, 1)) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a <= 1; +a b +1 NULL +1 NULL +1 1 +2 1 +SELECT * from t where a = 2 or a between 2 and 2; +a b +2 1 +explain format='brief' select * from trange where a = 2 or a between 2 and 2; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] or(eq(executor__partition__partition_with_expression.trange.a, 2), and(ge(executor__partition__partition_with_expression.trange.a, 2), le(executor__partition__partition_with_expression.trange.a, 2))) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a = 2 or a between 2 and 2; +a b +2 1 +explain format='brief' select * from thash where a = 2 or a between 2 and 2; +id estRows task access object operator info +TableReader 1.00 root partition:p2 data:Selection +└─Selection 1.00 cop[tikv] or(eq(executor__partition__partition_with_expression.thash.a, 2), and(ge(executor__partition__partition_with_expression.thash.a, 2), le(executor__partition__partition_with_expression.thash.a, 2))) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a = 2 or a between 2 and 2; +a b +2 1 +SELECT * from t where a != 2; +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a != 2; +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] ne(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a != 2; +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a != 2; +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] ne(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a != 2; +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a != 2 and a > 4; +a b +10 NULL +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a != 2 and a > 4; +id estRows task access object operator info +TableReader 5.00 root partition:p2 data:Selection +└─Selection 5.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 4), ne(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a != 2 and a > 4; +a b +10 NULL +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a != 2 and a > 4; +id estRows task access object operator info +TableReader 5.00 root partition:all data:Selection +└─Selection 5.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 4), ne(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a != 2 and a > 4; +a b +10 NULL +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a != 2 and a != 3; +a b +1 NULL +1 NULL +1 1 +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a != 2 and a != 3; +id estRows task access object operator info +TableReader 9.00 root partition:all data:Selection +└─Selection 9.00 cop[tikv] ne(executor__partition__partition_with_expression.trange.a, 2), ne(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a != 2 and a != 3; +a b +1 NULL +1 NULL +1 1 +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a != 2 and a != 3; +id estRows task access object operator info +TableReader 9.00 root partition:all data:Selection +└─Selection 9.00 cop[tikv] ne(executor__partition__partition_with_expression.thash.a, 2), ne(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a != 2 and a != 3; +a b +1 NULL +1 NULL +1 1 +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a != 2 and a = 3; +a b +3 2 +explain format='brief' select * from trange where a != 2 and a = 3; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.trange.a, 3) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a != 2 and a = 3; +a b +3 2 +explain format='brief' select * from thash where a != 2 and a = 3; +id estRows task access object operator info +TableReader 1.00 root partition:p3 data:Selection +└─Selection 1.00 cop[tikv] eq(executor__partition__partition_with_expression.thash.a, 3) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a != 2 and a = 3; +a b +3 2 +SELECT * from t where not (a = 2); +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where not (a = 2); +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] ne(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where not (a = 2); +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where not (a = 2); +id estRows task access object operator info +TableReader 10.00 root partition:all data:Selection +└─Selection 10.00 cop[tikv] ne(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where not (a = 2); +a b +1 NULL +1 NULL +1 1 +10 NULL +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where not (a > 2); +a b +1 NULL +1 NULL +1 1 +2 1 +explain format='brief' select * from trange where not (a > 2); +id estRows task access object operator info +TableReader 4.00 root partition:p0 data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where not (a > 2); +a b +1 NULL +1 NULL +1 1 +2 1 +explain format='brief' select * from thash where not (a > 2); +id estRows task access object operator info +TableReader 4.00 root partition:all data:Selection +└─Selection 4.00 cop[tikv] le(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where not (a > 2); +a b +1 NULL +1 NULL +1 1 +2 1 +SELECT * from t where not (a < 2); +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where not (a < 2); +id estRows task access object operator info +TableReader 8.00 root partition:all data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_with_expression.trange.a, 2) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where not (a < 2); +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where not (a < 2); +id estRows task access object operator info +TableReader 8.00 root partition:all data:Selection +└─Selection 8.00 cop[tikv] ge(executor__partition__partition_with_expression.thash.a, 2) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where not (a < 2); +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a + 1 > 4; +a b +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a + 1 > 4; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] gt(plus(executor__partition__partition_with_expression.trange.a, 1), 4) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a + 1 > 4; +a b +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a + 1 > 4; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] gt(plus(executor__partition__partition_with_expression.thash.a, 1), 4) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a + 1 > 4; +a b +10 NULL +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a - 1 > 0; +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from trange where a - 1 > 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] gt(minus(executor__partition__partition_with_expression.trange.a, 1), 0) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a - 1 > 0; +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +explain format='brief' select * from thash where a - 1 > 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] gt(minus(executor__partition__partition_with_expression.thash.a, 1), 0) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a - 1 > 0; +a b +10 NULL +2 1 +3 2 +4 3 +5 5 +6 7 +7 7 +7 7 +SELECT * from t where a * 2 < 0; +a b +explain format='brief' select * from trange where a * 2 < 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] lt(mul(executor__partition__partition_with_expression.trange.a, 2), 0) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a * 2 < 0; +a b +explain format='brief' select * from thash where a * 2 < 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] lt(mul(executor__partition__partition_with_expression.thash.a, 2), 0) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a * 2 < 0; +a b +SELECT * from t where a << 1 < 0; +a b +explain format='brief' select * from trange where a << 1 < 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] lt(leftshift(executor__partition__partition_with_expression.trange.a, 1), 0) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a << 1 < 0; +a b +explain format='brief' select * from thash where a << 1 < 0; +id estRows task access object operator info +TableReader 10.40 root partition:all data:Selection +└─Selection 10.40 cop[tikv] lt(leftshift(executor__partition__partition_with_expression.thash.a, 1), 0) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a << 1 < 0; +a b +SELECT * from t where a > '10'; +a b +explain format='brief' select * from trange where a > '10'; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 10) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > '10'; +a b +explain format='brief' select * from thash where a > '10'; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 10) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > '10'; +a b +SELECT * from t where a > '10ab'; +a b +explain format='brief' select * from trange where a > '10ab'; +id estRows task access object operator info +TableReader 0.00 root partition:dual data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.trange.a, 10) + └─TableFullScan 13.00 cop[tikv] table:trange keep order:false +SELECT * from trange where a > '10ab'; +a b +explain format='brief' select * from thash where a > '10ab'; +id estRows task access object operator info +TableReader 0.00 root partition:all data:Selection +└─Selection 0.00 cop[tikv] gt(executor__partition__partition_with_expression.thash.a, 10) + └─TableFullScan 13.00 cop[tikv] table:thash keep order:false +SELECT * from thash where a > '10ab'; +a b +set tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/r/executor/partition/table.result b/tests/integrationtest/r/executor/partition/table.result new file mode 100644 index 0000000000000..99c99010a747a --- /dev/null +++ b/tests/integrationtest/r/executor/partition/table.result @@ -0,0 +1,545 @@ +set @@session.tidb_partition_prune_mode = DEFAULT; +show warnings; +Level Code Message +set @@global.tidb_partition_prune_mode = DEFAULT; +show warnings; +Level Code Message +Warning 1105 Please analyze all partition tables again for consistency between partition and global stats +select @@global.tidb_partition_prune_mode; +@@global.tidb_partition_prune_mode +dynamic +select @@session.tidb_partition_prune_mode; +@@session.tidb_partition_prune_mode +dynamic +set @@session.tidb_partition_prune_mode = "static"; +show warnings; +Level Code Message +set @@global.tidb_partition_prune_mode = "static"; +show warnings; +Level Code Message +select @@session.tidb_partition_prune_mode; +@@session.tidb_partition_prune_mode +static +show warnings; +Level Code Message +select @@global.tidb_partition_prune_mode; +@@global.tidb_partition_prune_mode +static +set @@session.tidb_partition_prune_mode = "dynamic"; +show warnings; +Level Code Message +Warning 1105 Please analyze all partition tables again for consistency between partition and global stats +Warning 1105 Please avoid setting partition prune mode to dynamic at session level and set partition prune mode to dynamic at global level +set @@global.tidb_partition_prune_mode = "dynamic"; +show warnings; +Level Code Message +Warning 1105 Please analyze all partition tables again for consistency between partition and global stats +select @@global.tidb_partition_prune_mode; +@@global.tidb_partition_prune_mode +dynamic +select @@session.tidb_partition_prune_mode; +@@session.tidb_partition_prune_mode +dynamic +set @@session.tidb_partition_prune_mode = DEFAULT; +set @@global.tidb_partition_prune_mode = DEFAULT; +drop table if exists pt; +create table pt (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( +partition p0 values less than (4), +partition p1 values less than (7), +partition p2 values less than (10)); +analyze table pt; +insert into pt values (0, 0), (2, 2), (4, 4), (6, 6), (7, 7), (9, 9), (null, null); +select * from pt; +id c +NULL NULL +0 0 +2 2 +4 4 +6 6 +7 7 +9 9 +select * from pt where c > 10; +id c +select * from pt where c > 8; +id c +9 9 +select * from pt where c < 2 or c >= 9; +id c +0 0 +9 9 +select c from pt; +c +NULL +0 +2 +4 +6 +7 +9 +select c from pt where c > 10; +c +select c from pt where c > 8; +c +9 +select c from pt where c < 2 or c >= 9; +c +0 +9 +select /*+ use_index(pt, i_id) */ * from pt; +id c +NULL NULL +0 0 +2 2 +4 4 +6 6 +7 7 +9 9 +select /*+ use_index(pt, i_id) */ * from pt where id < 4 and c > 10; +id c +select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c > 8; +id c +9 9 +select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9; +id c +0 0 +9 9 +set @@tidb_enable_index_merge = 1; +select /*+ use_index(i_c, i_id) */ * from pt where id = 4 or c < 7; +id c +0 0 +2 2 +4 4 +6 6 +set @@tidb_enable_index_merge = DEFAULT; +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( +partition p0 values less than (4), +partition p1 values less than (7), +partition p2 values less than (10)); +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +id c id +4 4 4 +9 9 9 +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +id +4 +9 +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by list (c) ( +partition p0 values in (1,2,3,4), +partition p1 values in (5,6,7), +partition p2 values in (8, 9,10)); +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +id c id +4 4 4 +9 9 9 +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +id +4 +9 +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by hash(c) partitions 5; +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +id c id +4 4 4 +9 9 9 +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +id +4 +9 +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int)) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue); +create table t2 (c_int int, c_str varchar(40), primary key (c_int, c_str)) partition by hash (c_int) partitions 4; +insert into t1 values (10, 'interesting neumann'); +insert into t2 select * from t1; +begin; +insert into t2 values (11, 'hopeful hoover'); +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11); +c_int c_str c_int c_str +10 interesting neumann 10 interesting neumann +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11); +c_int c_str c_int c_str +10 interesting neumann 10 interesting neumann +commit; +drop table if exists t; +create table t(c_int int); +insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9); +DROP TABLE IF EXISTS `t1`; +CREATE TABLE t1 ( +c_int int NOT NULL, +c_str varchar(40) NOT NULL, +c_datetime datetime NOT NULL, +c_timestamp timestamp NULL DEFAULT NULL, +c_double double DEFAULT NULL, +c_decimal decimal(12,6) DEFAULT NULL, +PRIMARY KEY (c_int,c_str,c_datetime) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci +PARTITION BY RANGE (c_int) +(PARTITION p0 VALUES LESS THAN (2) ENGINE = InnoDB, +PARTITION p1 VALUES LESS THAN (4) ENGINE = InnoDB, +PARTITION p2 VALUES LESS THAN (6) ENGINE = InnoDB, +PARTITION p3 VALUES LESS THAN (8) ENGINE = InnoDB, +PARTITION p4 VALUES LESS THAN (10) ENGINE = InnoDB, +PARTITION p5 VALUES LESS THAN (20) ENGINE = InnoDB, +PARTITION p6 VALUES LESS THAN (50) ENGINE = InnoDB, +PARTITION p7 VALUES LESS THAN (1000000000) ENGINE = InnoDB); +INSERT INTO `t1` VALUES (19,'nifty feistel','2020-02-28 04:01:28','2020-02-04 06:11:57',32.430079,1.284000),(20,'objective snyder','2020-04-15 17:55:04','2020-05-30 22:04:13',37.690874,9.372000); +begin; +insert into t1 values (22, 'wizardly saha', '2020-05-03 16:35:22', '2020-05-03 02:18:42', 96.534810, 0.088); +select c_int from t where (select min(t1.c_int) from t1 where t1.c_int > t.c_int) > (select count(*) from t1 where t1.c_int > t.c_int) order by c_int; +c_int +1 +2 +3 +4 +5 +6 +7 +8 +9 +rollback; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)); +create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)) partition by hash (c_int) partitions 4; +insert into t1 values (1, 'romantic robinson', 4.436), (2, 'stoic chaplygin', 9.826), (3, 'vibrant shamir', 6.300), (4, 'hungry wilson', 4.900), (5, 'naughty swartz', 9.524); +insert into t2 select * from t1; +select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str); +c_int c_str c_decimal +1 romantic robinson 4.436000 +2 stoic chaplygin 9.826000 +3 vibrant shamir 6.300000 +4 hungry wilson 4.900000 +5 naughty swartz 9.524000 +set @@tidb_partition_prune_mode='static'; +select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str); +c_int c_str c_decimal +1 romantic robinson 4.436000 +2 stoic chaplygin 9.826000 +3 vibrant shamir 6.300000 +4 hungry wilson 4.900000 +5 naughty swartz 9.524000 +set @@tidb_partition_prune_mode=default; +drop table if exists coverage_rr, coverage_dt; +create table coverage_rr ( +pk1 varchar(35) NOT NULL, +pk2 int NOT NULL, +c int, +PRIMARY KEY (pk1,pk2)) partition by hash(pk2) partitions 4; +create table coverage_dt (pk1 varchar(35), pk2 int); +insert into coverage_rr values ('ios', 3, 2),('android', 4, 7),('linux',5,1); +insert into coverage_dt values ('apple',3),('ios',3),('linux',5); +set @@tidb_partition_prune_mode = 'dynamic'; +select /*+ INL_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2); +pk1 pk2 pk1 pk2 c +ios 3 ios 3 2 +linux 5 linux 5 1 +select /*+ INL_MERGE_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2); +pk1 pk2 pk1 pk2 c +ios 3 ios 3 2 +linux 5 linux 5 1 +set @@tidb_partition_prune_mode = default; +drop table if exists tunsigned_hash; +create table tunsigned_hash(a bigint unsigned primary key) partition by hash(a) partitions 6; +insert into tunsigned_hash values(25), (9279808998424041135); +select min(a) from tunsigned_hash; +min(a) +25 +select max(a) from tunsigned_hash; +max(a) +9279808998424041135 +drop table if exists t, t1; +create table t (id int not null, store_id int not null )partition by range (store_id)(partition p0 values less than (6),partition p1 values less than (11),partition p2 values less than (16),partition p3 values less than (21)); +create table t1(id int not null, store_id int not null); +insert into t values (1, 1); +insert into t values (2, 17); +insert into t1 values (0, 18); +alter table t exchange partition p3 with table t1; +alter table t add index idx(id); +analyze table t; +select *,_tidb_rowid from t use index(idx) order by id limit 2; +id store_id _tidb_rowid +0 18 1 +1 1 1 +drop table t, t1; +create table t (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c))partition by range (b)(partition p0 values less than (6),partition p1 values less than (11),partition p2 values less than (16),partition p3 values less than (21)); +create table t1 (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c)); +insert into t values (1,2,3), (2,3,4), (3,4,5); +insert into t1 values (1,18,3); +alter table t exchange partition p3 with table t1; +analyze table t; +select * from t where a = 1 or b = 5 order by c limit 2; +a b c +1 18 3 +1 2 3 +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b) order by b, _tidb_rowid limit 10; +a b c +2 -1 3 +1 1 1 +3 2 2 +analyze table t; +select * from t use index(idx_b) order by b, _tidb_rowid limit 10; +a b c +2 -1 3 +1 1 1 +3 2 2 +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,primary key(`a`),KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b) order by b, a limit 10; +a b c +2 -1 3 +1 1 1 +3 2 2 +analyze table t; +select * from t use index(idx_b) order by b, a limit 10; +a b c +2 -1 3 +1 1 1 +3 2 2 +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`),KEY `idx_c` (`c`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10; +a b c +3 2 2 +1 1 1 +analyze table t; +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10; +a b c +3 2 2 +1 1 1 +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`),KEY `idx_c` (`c`),PRIMARY KEY (`a`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10; +a b c +1 1 1 +3 2 2 +analyze table t; +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10; +a b c +1 1 1 +3 2 2 +drop table if exists trange, thash; +create table trange(a int, b int, primary key(a) clustered, index idx_b(b)) partition by range(a) ( +partition p0 values less than(300), +partition p1 values less than(500), +partition p2 values less than(1100)); +create table thash(a int, b int, primary key(a) clustered, index idx_b(b)) partition by hash(a) partitions 4; +analyze table thash, trange; +explain format='brief' select * from trange where a>400; +id estRows task access object operator info +TableReader 3333.33 root partition:p1,p2 data:TableRangeScan +└─TableRangeScan 3333.33 cop[tikv] table:trange range:(400,+inf], keep order:false, stats:pseudo +explain format='brief' select * from thash where a>=100; +id estRows task access object operator info +TableReader 3333.33 root partition:all data:TableRangeScan +└─TableRangeScan 3333.33 cop[tikv] table:thash range:[100,+inf], keep order:false, stats:pseudo +drop table if exists t; +set @@tidb_partition_prune_mode = 'dynamic'; +create table t(a int) partition by range(a) ( +partition p0 values less than (5), +partition p1 values less than (10), +partition p2 values less than (15)); +insert into t values (2), (7), (12); +analyze table t; +explain format='brief' select * from t where a < 3; +id estRows task access object operator info +TableReader 1.00 root partition:p0 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__table.t.a, 3) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 3; +a +2 +explain format='brief' select * from t where a < 8; +id estRows task access object operator info +TableReader 2.00 root partition:p0,p1 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__table.t.a, 8) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 8; +a +2 +7 +explain format='brief' select * from t where a < 20; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__table.t.a, 20) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 20; +a +12 +2 +7 +alter table t drop partition p0; +explain format='brief' select * from t where a < 3; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__table.t.a, 3) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 3; +a +explain format='brief' select * from t where a < 8; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__table.t.a, 8) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 8; +a +7 +explain format='brief' select * from t where a < 20; +id estRows task access object operator info +TableReader 3.00 root partition:all data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__table.t.a, 20) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 20; +a +12 +7 +alter table t add partition (partition p3 values less than (20)); +alter table t add partition (partition p4 values less than (40)); +insert into t values (15), (25); +explain format='brief' select * from t where a < 3; +id estRows task access object operator info +TableReader 1.00 root partition:p1 data:Selection +└─Selection 1.00 cop[tikv] lt(executor__partition__table.t.a, 3) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 3; +a +explain format='brief' select * from t where a < 8; +id estRows task access object operator info +TableReader 2.00 root partition:p1 data:Selection +└─Selection 2.00 cop[tikv] lt(executor__partition__table.t.a, 8) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 8; +a +7 +explain format='brief' select * from t where a < 20; +id estRows task access object operator info +TableReader 3.00 root partition:p1,p2,p3 data:Selection +└─Selection 3.00 cop[tikv] lt(executor__partition__table.t.a, 20) + └─TableFullScan 3.00 cop[tikv] table:t keep order:false +select * from t where a < 20; +a +12 +15 +7 +drop table if exists t; +create table t(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11)); +analyze table t; +set @@tidb_partition_prune_mode = 'static'; +begin; +explain format='brief' select * from t; +id estRows task access object operator info +PartitionUnion 30000.00 root +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t, partition:p0 keep order:false, stats:pseudo +├─TableReader 10000.00 root data:TableFullScan +│ └─TableFullScan 10000.00 cop[tikv] table:t, partition:p1 keep order:false, stats:pseudo +└─TableReader 10000.00 root data:TableFullScan + └─TableFullScan 10000.00 cop[tikv] table:t, partition:p2 keep order:false, stats:pseudo +select * from t; +a b +explain format='brief' select * from t where a > 3; +id estRows task access object operator info +PartitionUnion 6666.67 root +├─TableReader 3333.33 root data:Selection +│ └─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 3) +│ └─TableFullScan 10000.00 cop[tikv] table:t, partition:p1 keep order:false, stats:pseudo +└─TableReader 3333.33 root data:Selection + └─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 3) + └─TableFullScan 10000.00 cop[tikv] table:t, partition:p2 keep order:false, stats:pseudo +select * from t where a > 3; +a b +explain format='brief' select * from t where a > 7; +id estRows task access object operator info +TableReader 3333.33 root data:Selection +└─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 7) + └─TableFullScan 10000.00 cop[tikv] table:t, partition:p2 keep order:false, stats:pseudo +select * from t where a > 7; +a b +rollback; +set @@tidb_partition_prune_mode = 'dynamic'; +begin; +explain format='brief' select * from t; +id estRows task access object operator info +TableReader 10000.00 root partition:all data:TableFullScan +└─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select * from t; +a b +explain format='brief' select * from t where a > 3; +id estRows task access object operator info +TableReader 3333.33 root partition:p1,p2 data:Selection +└─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 3) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select * from t where a > 3; +a b +explain format='brief' select * from t where a > 7; +id estRows task access object operator info +TableReader 3333.33 root partition:p2 data:Selection +└─Selection 3333.33 cop[tikv] gt(executor__partition__table.t.a, 7) + └─TableFullScan 10000.00 cop[tikv] table:t keep order:false, stats:pseudo +select * from t where a > 7; +a b +rollback; +set @@tidb_partition_prune_mode = default; +drop table if exists tt1, tt2; +set global tidb_partition_prune_mode='dynamic'; +set session tidb_partition_prune_mode='dynamic'; +CREATE TABLE tt1 ( +id INT NOT NULL, +listid INT, +name varchar(10), +primary key (listid) clustered +) +PARTITION BY LIST (listid) ( +PARTITION p1 VALUES IN (1), +PARTITION p2 VALUES IN (2), +PARTITION p3 VALUES IN (3), +PARTITION p4 VALUES IN (4) +); +CREATE TABLE tt2 ( +id INT NOT NULL, +listid INT +); +create index idx_listid on tt1(id,listid); +create index idx_listid on tt2(listid); +insert into tt1 values(1,1,1); +insert into tt1 values(2,2,2); +insert into tt1 values(3,3,3); +insert into tt1 values(4,4,4); +insert into tt2 values(1,1); +insert into tt2 values(2,2); +insert into tt2 values(3,3); +insert into tt2 values(4,4); +insert into tt2 values(5,5); +analyze table tt1; +analyze table tt2; +select /*+ inl_join(tt1)*/ count(*) from tt2 +left join tt1 on tt1.listid=tt2.listid and tt1.id=tt2.id; +count(*) +5 +select /*+ inl_join(tt1)*/ count(*) from tt2 +left join tt1 on tt1.listid=tt2.listid; +count(*) +5 +explain format = 'brief' select /*+ inl_join(tt1)*/ count(*) from tt2 +left join tt1 on tt1.listid=tt2.listid; +id estRows task access object operator info +StreamAgg 1.00 root funcs:count(Column#13)->Column#7 +└─IndexReader 1.00 root index:StreamAgg + └─StreamAgg 1.00 cop[tikv] funcs:count(1)->Column#13 + └─IndexFullScan 5.00 cop[tikv] table:tt2, index:idx_listid(listid) keep order:false +set global tidb_partition_prune_mode=default; +set session tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/r/executor/prepared.result b/tests/integrationtest/r/executor/prepared.result index ded5242354bf4..fc2ccce32e8a1 100644 --- a/tests/integrationtest/r/executor/prepared.result +++ b/tests/integrationtest/r/executor/prepared.result @@ -179,3 +179,139 @@ data 1.100 11.110 set @@tidb_enable_prepared_plan_cache=default; +drop table if exists t; +create table t (id int, KEY id (id)); +prepare stmt from 'select * from t limit ? offset ?'; +prepare stmt from 'select b from t'; +Error 1054 (42S22): Unknown column 'b' in 'field list' +prepare stmt from '(select * FROM t) union all (select * FROM t) order by a limit ?'; +Error 1054 (42S22): Unknown column 'a' in 'order clause' +drop table if exists t; +prepare stmt from 'create table t (id int, KEY id (id))'; +prepare stmt0 from "create table t0(a int primary key)"; +prepare stmt1 from "execute stmt0"; +Error 1295 (HY000): This command is not supported in the prepared statement protocol yet +prepare stmt2 from "deallocate prepare stmt0"; +Error 1295 (HY000): This command is not supported in the prepared statement protocol yet +prepare stmt4 from "prepare stmt3 from 'create table t1(a int, b int)'"; +Error 1295 (HY000): This command is not supported in the prepared statement protocol yet +drop table if exists t; +create table t (id int primary key, num int); +insert into t values (1, 1); +insert into t values (2, 2); +insert into t values (3, 3); +prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t where id=?'; +set @ignore_plan_doma = 1; +execute stmt using @ignore_plan_doma; +id num +1 1 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +prepare stmt from "select /*+ max_execution_time(10) */ sleep(3)"; +set @a=now(); +execute stmt; +sleep(3) +1 +select timediff(now(), @a) < 3; +timediff(now(), @a) < 3 +1 +set @a=now(); +select /*+ max_execution_time(10) */ sleep(3); +sleep(3) +1 +select timediff(now(), @a) < 3; +timediff(now(), @a) < 3 +1 +drop table if exists t; +create table t (i int); +prepare stmt from 'with a as (select /*+ qb_name(qb1) */ * from t) select /*+ leading(@qb1)*/ * from a;'; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +prepare stmt from 'SELECT IF(?, 1, 0);'; +set @a=1, @b=null, @c=0; +execute stmt using @a; +IF(?, 1, 0) +1 +execute stmt using @b; +IF(?, 1, 0) +0 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +execute stmt using @c; +IF(?, 1, 0) +0 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +drop table if exists IDT_26207; +CREATE TABLE IDT_26207 (col1 bit(1)); +insert into IDT_26207 values(0x0), (0x1); +prepare stmt from 'select hex(t1.col1) from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'; +set @a=0x01, @b=0x01, @c=0x01; +execute stmt using @a,@b,@c; +hex(t1.col1) +1 +set @a=0x00, @b=0x00, @c=0x01; +execute stmt using @a,@b,@c; +hex(t1.col1) +0 +1 +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +drop table if exists IDT_MC21780; +CREATE TABLE IDT_MC21780 ( +COL1 timestamp NULL DEFAULT NULL, +COL2 timestamp NULL DEFAULT NULL, +COL3 timestamp NULL DEFAULT NULL, +KEY U_M_COL (COL1,COL2) +); +insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28"); +prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'; +set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"; +execute stmt using @a,@b,@c,@d; +COL1 COL2 COL3 COL1 COL2 COL3 +set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"; +execute stmt using @a,@b,@c,@d; +COL1 COL2 COL3 COL1 COL2 COL3 +1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 1970-12-18 10:53:28 +select @@last_plan_from_cache; +@@last_plan_from_cache +1 +set tidb_enable_prepared_plan_cache=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists tmp2; +create temporary table tmp2 (a int, b int, key(a), key(b)); +prepare stmt from 'select * from tmp2;'; +execute stmt; +a b +execute stmt; +a b +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +drop table if exists tmp_t; +create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows; +prepare stmt from 'select * from tmp_t;'; +execute stmt; +id a b +execute stmt; +id a b +select @@last_plan_from_cache; +@@last_plan_from_cache +0 +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set tidb_enable_prepared_plan_cache=1; +set @@tidb_txn_mode = 'pessimistic'; +prepare stmt1 from 'do 1'; +set @@tidb_txn_mode = 'optimistic'; +prepare stmt1 from 'do 1'; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_txn_mode=default; diff --git a/tests/integrationtest/r/executor/revoke.result b/tests/integrationtest/r/executor/revoke.result new file mode 100644 index 0000000000000..2ba92d9cf193c --- /dev/null +++ b/tests/integrationtest/r/executor/revoke.result @@ -0,0 +1,85 @@ +drop user if exists test; +drop table if exists test1; +CREATE USER test; +CREATE TABLE executor__revoke.test1(c1 int); +GRANT SELECT ON executor__revoke.test1 TO test; +REVOKE SELECT ON executor__revoke.test1 from test; +SELECT Column_priv FROM mysql.tables_priv WHERE User="test" ; +Column_priv +drop user if exists test; +CREATE USER test; +GRANT SELECT(Host) ON mysql.db TO test; +GRANT SELECT(DB) ON mysql.db TO test; +REVOKE SELECT(Host) ON mysql.db FROM test; +SELECT count(Column_priv) FROM mysql.columns_priv WHERE User="test" and Column_name ='Host' ; +count(Column_priv) +0 +SELECT count(Column_priv) FROM mysql.columns_priv WHERE User="test" and Column_name ='DB' ; +count(Column_priv) +1 +DROP USER if exists dyn; +create user dyn; +GRANT BACKUP_Admin ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN N +REVOKE BACKUP_Admin,system_variables_admin ON executor__revoke.* FROM dyn; +Error 3619 (HY000): Illegal privilege level specified for BACKUP_ADMIN,SYSTEM_VARIABLES_ADMIN +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN N +REVOKE BACKUP_Admin ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +REVOKE bogus ON *.* FROM dyn; +SHOW WARNINGS; +Level Code Message +Warning 3929 Dynamic privilege 'BOGUS' is not registered with the server. +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % BACKUP_ADMIN N +dyn % SYSTEM_VARIABLES_ADMIN N +REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* TO dyn; +REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT ON *.* TO dyn WITH GRANT OPTION; +REVOKE BACKUP_ADMIN, SELECT, GRANT OPTION ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +USER HOST PRIV WITH_GRANT_OPTION +dyn % SYSTEM_VARIABLES_ADMIN Y +drop DATABASE if exists d1; +drop user if exists issue28533; +CREATE DATABASE d1; +USE d1; +CREATE TABLE t1 (a int); +CREATE USER issue28533; +GRANT ALTER ON d1.t1 TO issue28533; +GRANT INSERT, CREATE ON d1.t2 TO issue28533; +DROP TABLE t1; +REVOKE ALTER ON d1.t1 FROM issue28533; +DROP USER issue28533; +DROP TABLE IF EXISTS t1; +DROP DATABASE IF EXISTS d1; +use executor__revoke; +drop user if exists 't1234'@'%'; +create table if not exists xx (id int); +CREATE USER 't1234'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU'; +GRANT USAGE ON * TO 't1234'@'%'; +GRANT USAGE ON executor__revoke.* TO 't1234'@'%'; +GRANT USAGE ON executor__revoke.xx TO 't1234'@'%'; +REVOKE USAGE ON * FROM 't1234'@'%'; +REVOKE USAGE ON executor__revoke.* FROM 't1234'@'%'; +REVOKE USAGE ON executor__revoke.xx FROM 't1234'@'%'; +drop table if exists TABLE_PRIV; +CREATE TABLE executor__revoke.TABLE_PRIV(id int, name varchar(20)); +GRANT SELECT ON executor__revoke.table_priv TO 'root'@'%'; +revoke SELECT ON executor__revoke.TABLE_PRIV from 'root'@'%'; +GRANT SELECT ON executor__revoke.* TO 'root'@'%'; +revoke SELECT ON executor__revoke.* from 'root'@'%'; +GRANT SELECT (id), INSERT (ID, name) ON executor__revoke.TABLE_PRIV TO 'root'@'%'; +REVOKE SELECT (ID) ON executor__revoke.taBle_priv from 'root'@'%'; diff --git a/tests/integrationtest/r/executor/sample.result b/tests/integrationtest/r/executor/sample.result new file mode 100644 index 0000000000000..9c9ec8e3f1985 --- /dev/null +++ b/tests/integrationtest/r/executor/sample.result @@ -0,0 +1,189 @@ +set @@global.tidb_scatter_region=1 +drop table if exists t; +set tidb_enable_clustered_index = on; +create table t (a varchar(255) primary key, b bigint); +insert into t values ('b', 100), ('y', 100); +split table t between ('a') and ('z') regions 2; +select a from t tablesample regions(); +a +b +y +drop table t; +create table t (a varchar(255), b int, c decimal, primary key (a, b, c)); +split table t between ('a', 0, 0) and ('z', 100, 100) regions 2; +insert into t values ('b', 10, 100), ('y', 100, 10); +select * from t tablesample regions(); +a b c +b 10 100 +y 100 10 +drop table t; +create table t (a bigint primary key, b int default 10); +split table t between (1) and (100000) regions 4; +insert into t(a) values (200), (25600), (50300), (99900), (99901); +select a from t tablesample regions(); +a +200 +25600 +50300 +99900 +drop table t; +create table t (a bigint, b int default 10); +split table t between (0) and (100000) regions 4; +insert into t(a) values (1), (2), (3); +select a from t tablesample regions(); +a +1 +set tidb_enable_clustered_index=default; +drop table if exists t; +create table t (a int, b varchar(255)); +insert into t values (1, 'abc'); +create view v as select * from t; +select * from v tablesample regions(); +Error 8128 (HY000): Invalid TABLESAMPLE: Unsupported TABLESAMPLE in views +select * from information_schema.tables tablesample regions(); +Error 8128 (HY000): Invalid TABLESAMPLE: Unsupported TABLESAMPLE in virtual tables +select a from t tablesample system(); +Error 8128 (HY000): Invalid TABLESAMPLE: Only supports REGIONS sampling method +select a from t tablesample bernoulli(10 percent); +Error 8128 (HY000): Invalid TABLESAMPLE: Only supports REGIONS sampling method +select a from t as t1 tablesample regions(), t as t2 tablesample system(); +Error 8128 (HY000): Invalid TABLESAMPLE: Only supports REGIONS sampling method +select a from t tablesample (); +Error 8128 (HY000): Invalid TABLESAMPLE: Only supports REGIONS sampling method +drop table if exists t; +create table t (a int, b varchar(255)); +insert into t values (1, 'abc'); +select _tidb_rowid from t tablesample regions(); +_tidb_rowid +1 +select a, _tidb_rowid from t tablesample regions(); +a _tidb_rowid +1 1 +select _tidb_rowid, b from t tablesample regions(); +_tidb_rowid b +1 abc +select b, _tidb_rowid, a from t tablesample regions(); +b _tidb_rowid a +abc 1 1 +drop table if exists t; +create table t (a int, b varchar(255), primary key (a)) partition by hash(a) partitions 2; +insert into t values (1, '1'), (2, '2'), (3, '3'); +select count(*) from t tablesample regions(); +count(*) +2 +delete from t; +insert into t values (1, '1'); +select count(*) from t partition (p0) tablesample regions(); +count(*) +0 +select count(*) from t partition (p1) tablesample regions(); +count(*) +1 +drop table if exists t; +create table t (a int, b int, unique key idx(a)) partition by range (a) ( +partition p0 values less than (0), +partition p1 values less than (10), +partition p2 values less than (30), +partition p3 values less than (maxvalue)); +insert into t values (2, 2), (31, 31), (12, 12); +select _tidb_rowid from t tablesample regions() order by _tidb_rowid; +_tidb_rowid +1 +2 +3 +drop table if exists t; +create table t (a int primary key, b int as (a + 1), c int as (b + 1), d int as (c + 1)); +split table t between (0) and (10000) regions 4; +insert into t(a) values (1), (2), (2999), (4999), (9999); +select a from t tablesample regions(); +a +1 +2999 +9999 +select c from t tablesample regions(); +c +3 +3001 +10001 +select a, b from t tablesample regions(); +a b +1 2 +2999 3000 +9999 10000 +select d, c from t tablesample regions(); +d c +4 3 +3002 3001 +10002 10001 +select a, d from t tablesample regions(); +a d +1 4 +2999 3002 +9999 10002 +drop table if exists t; +create table t (a int primary key); +split table t between (0) and (40000) regions 4; +insert into t values (1), (1000), (10002); +select * from t tablesample regions(); +a +1 +10002 +begin; +insert into t values (20006), (50000); +select * from t tablesample regions(); +a +1 +10002 +delete from t where a = 1; +select * from t tablesample regions(); +a +1 +10002 +commit; +select * from t tablesample regions(); +a +1000 +10002 +20006 +50000 +drop table if exists t; +create table t (a int primary key); +split table t between (0) and (40000) regions 4; +insert into t values (1), (1000), (10002); +begin; +select * from t tablesample regions(); +a +1 +10002 +insert into t values (20006), (50000); +select * from t tablesample regions(); +a +1 +10002 +commit; +select * from t tablesample regions(); +a +1 +10002 +20006 +50000 +drop table if exists t; +create table t (a int primary key, b int, c varchar(255)); +split table t between (0) and (10000) regions 5; +insert into t values (1000, 1, '1'), (1001, 1, '1'), (2100, 2, '2'), (4500, 3, '3'); +create index idx_0 on t (b); +select a from t tablesample regions() order by a; +a +1000 +2100 +4500 +select a from t use index (idx_0) tablesample regions() order by a; +a +1000 +1001 +2100 +4500 +show warnings; +Level Code Message +Warning 8128 Invalid TABLESAMPLE: plan not supported +set @@global.tidb_scatter_region=default; diff --git a/tests/integrationtest/r/executor/set.result b/tests/integrationtest/r/executor/set.result new file mode 100644 index 0000000000000..1482cac01daa4 --- /dev/null +++ b/tests/integrationtest/r/executor/set.result @@ -0,0 +1,21 @@ +set @@global.tidb_max_delta_schema_count= -1; +show warnings; +Level Code Message +Warning 1292 Truncated incorrect tidb_max_delta_schema_count value: '-1' +select @@global.tidb_max_delta_schema_count; +@@global.tidb_max_delta_schema_count +100 +set @@global.tidb_max_delta_schema_count= 9223372036854775807; +show warnings; +Level Code Message +Warning 1292 Truncated incorrect tidb_max_delta_schema_count value: '9223372036854775807' +select @@global.tidb_max_delta_schema_count; +@@global.tidb_max_delta_schema_count +16384 +set @@global.tidb_max_delta_schema_count= invalid_val; +Error 1232 (42000): Incorrect argument type to variable 'tidb_max_delta_schema_count' +set @@global.tidb_max_delta_schema_count= 2048; +select @@global.tidb_max_delta_schema_count; +@@global.tidb_max_delta_schema_count +2048 +set @@global.tidb_max_delta_schema_count= default; diff --git a/tests/integrationtest/r/executor/show.result b/tests/integrationtest/r/executor/show.result index 3e7dfc9181929..679caffa6fa8c 100644 --- a/tests/integrationtest/r/executor/show.result +++ b/tests/integrationtest/r/executor/show.result @@ -145,3 +145,980 @@ Table Create Table t CREATE TABLE `t` ( `created_at` datetime DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T![ttl] TTL=`created_at` + INTERVAL 100 YEAR */ /*T![ttl] TTL_ENABLE='ON' */ /*T![ttl] TTL_JOB_INTERVAL='1d' */ +show histograms_in_flight; +HistogramsInFlight +0 +show open tables; +Database Table In_use Name_locked +show open tables in executor__show; +Database Table In_use Name_locked +create or replace view v1 as select 1; +show create view v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`%` SQL SECURITY DEFINER VIEW `v1` (`1`) AS SELECT 1 AS `1` utf8mb4 utf8mb4_general_ci +drop view v1; +drop database if exists test1; +drop database if exists test2; +drop table if exists t, t1; +create table t1(a int,b int); +drop view if exists v1; +create or replace definer=`root`@`127.0.0.1` view v1 as select * from t1; +show create table v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v1` (`a`, `b`) AS SELECT `executor__show`.`t1`.`a` AS `a`,`executor__show`.`t1`.`b` AS `b` FROM `executor__show`.`t1` utf8mb4 utf8mb4_general_ci +show create view v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v1` (`a`, `b`) AS SELECT `executor__show`.`t1`.`a` AS `a`,`executor__show`.`t1`.`b` AS `b` FROM `executor__show`.`t1` utf8mb4 utf8mb4_general_ci +drop view v1; +drop table t1; +drop view if exists v; +create or replace definer=`root`@`127.0.0.1` view v as select JSON_MERGE('{}', '{}') as col; +show create view v; +View Create View character_set_client collation_connection +v CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v` (`col`) AS SELECT JSON_MERGE(_UTF8MB4'{}', _UTF8MB4'{}') AS `col` utf8mb4 utf8mb4_general_ci +drop view if exists v; +drop table if exists t1; +create table t1(a int,b int); +create or replace definer=`root`@`127.0.0.1` view v1 as select avg(a),t1.* from t1 group by a; +show create view v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v1` (`avg(a)`, `a`, `b`) AS SELECT AVG(`a`) AS `avg(a)`,`executor__show`.`t1`.`a` AS `a`,`executor__show`.`t1`.`b` AS `b` FROM `executor__show`.`t1` GROUP BY `a` utf8mb4 utf8mb4_general_ci +drop view v1; +create or replace definer=`root`@`127.0.0.1` view v1 as select a+b, t1.* , a as c from t1; +show create view v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`127.0.0.1` SQL SECURITY DEFINER VIEW `v1` (`a+b`, `a`, `b`, `c`) AS SELECT `a`+`b` AS `a+b`,`executor__show`.`t1`.`a` AS `a`,`executor__show`.`t1`.`b` AS `b`,`a` AS `c` FROM `executor__show`.`t1` utf8mb4 utf8mb4_general_ci +drop table t1; +drop view v1; +create table t(c int, b int as (c + 1))ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `c` int(11) DEFAULT NULL, + `b` int(11) GENERATED ALWAYS AS (`c` + 1) VIRTUAL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table t; +create table t(c int, b int as (c + 1) not null)ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `c` int(11) DEFAULT NULL, + `b` int(11) GENERATED ALWAYS AS (`c` + 1) VIRTUAL NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table t; +create table t ( a char(10) charset utf8 collate utf8_bin, b char(10) as (rtrim(a))); +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `a` char(10) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `b` char(10) GENERATED ALWAYS AS (rtrim(`a`)) VIRTUAL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table t; +drop table if exists different_charset; +create table different_charset(ch1 varchar(10) charset utf8, ch2 varchar(10) charset binary); +show create table different_charset; +Table Create Table +different_charset CREATE TABLE `different_charset` ( + `ch1` varchar(10) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `ch2` varbinary(10) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists t; +create table `t` ( +`a` timestamp not null default current_timestamp, +`b` timestamp(3) default current_timestamp(3), +`c` datetime default current_timestamp, +`d` datetime(4) default current_timestamp(4), +`e` varchar(20) default 'cUrrent_tImestamp', +`f` datetime(2) default current_timestamp(2) on update current_timestamp(2), +`g` timestamp(2) default current_timestamp(2) on update current_timestamp(2), +`h` date default current_date ); +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `a` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + `b` timestamp(3) DEFAULT CURRENT_TIMESTAMP(3), + `c` datetime DEFAULT CURRENT_TIMESTAMP, + `d` datetime(4) DEFAULT CURRENT_TIMESTAMP(4), + `e` varchar(20) DEFAULT 'cUrrent_tImestamp', + `f` datetime(2) DEFAULT CURRENT_TIMESTAMP(2) ON UPDATE CURRENT_TIMESTAMP(2), + `g` timestamp(2) DEFAULT CURRENT_TIMESTAMP(2) ON UPDATE CURRENT_TIMESTAMP(2), + `h` date DEFAULT CURRENT_DATE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table t; +create table t (a int, b int) shard_row_id_bits = 4 pre_split_regions=3; +show create table `t`; +Table Create Table +t CREATE TABLE `t` ( + `a` int(11) DEFAULT NULL, + `b` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /*T! SHARD_ROW_ID_BITS=4 PRE_SPLIT_REGIONS=3 */ +drop table t; +drop table if exists t1; +create table t1(c int unsigned default 0); +show create table `t1`; +Table Create Table +t1 CREATE TABLE `t1` ( + `c` int(10) unsigned DEFAULT '0' +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table t1; +CREATE TABLE `log` (`LOG_ID` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT,`ROUND_ID` bigint(20) UNSIGNED NOT NULL,`USER_ID` int(10) UNSIGNED NOT NULL,`USER_IP` int(10) UNSIGNED DEFAULT NULL,`END_TIME` datetime NOT NULL,`USER_TYPE` int(11) DEFAULT NULL,`APP_ID` int(11) DEFAULT NULL,PRIMARY KEY (`LOG_ID`,`END_TIME`) NONCLUSTERED,KEY `IDX_EndTime` (`END_TIME`),KEY `IDX_RoundId` (`ROUND_ID`),KEY `IDX_UserId_EndTime` (`USER_ID`,`END_TIME`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=505488 PARTITION BY RANGE ( month(`end_time`) ) (PARTITION `p1` VALUES LESS THAN (2),PARTITION `p2` VALUES LESS THAN (3),PARTITION `p3` VALUES LESS THAN (4),PARTITION `p4` VALUES LESS THAN (5),PARTITION `p5` VALUES LESS THAN (6),PARTITION `p6` VALUES LESS THAN (7),PARTITION `p7` VALUES LESS THAN (8),PARTITION `p8` VALUES LESS THAN (9),PARTITION `p9` VALUES LESS THAN (10),PARTITION `p10` VALUES LESS THAN (11),PARTITION `p11` VALUES LESS THAN (12),PARTITION `p12` VALUES LESS THAN (MAXVALUE)); +show create table log; +Table Create Table +log CREATE TABLE `log` ( + `LOG_ID` bigint(20) unsigned NOT NULL AUTO_INCREMENT, + `ROUND_ID` bigint(20) unsigned NOT NULL, + `USER_ID` int(10) unsigned NOT NULL, + `USER_IP` int(10) unsigned DEFAULT NULL, + `END_TIME` datetime NOT NULL, + `USER_TYPE` int(11) DEFAULT NULL, + `APP_ID` int(11) DEFAULT NULL, + PRIMARY KEY (`LOG_ID`,`END_TIME`) /*T![clustered_index] NONCLUSTERED */, + KEY `IDX_EndTime` (`END_TIME`), + KEY `IDX_RoundId` (`ROUND_ID`), + KEY `IDX_UserId_EndTime` (`USER_ID`,`END_TIME`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=505488 +PARTITION BY RANGE (MONTH(`end_time`)) +(PARTITION `p1` VALUES LESS THAN (2), + PARTITION `p2` VALUES LESS THAN (3), + PARTITION `p3` VALUES LESS THAN (4), + PARTITION `p4` VALUES LESS THAN (5), + PARTITION `p5` VALUES LESS THAN (6), + PARTITION `p6` VALUES LESS THAN (7), + PARTITION `p7` VALUES LESS THAN (8), + PARTITION `p8` VALUES LESS THAN (9), + PARTITION `p9` VALUES LESS THAN (10), + PARTITION `p10` VALUES LESS THAN (11), + PARTITION `p11` VALUES LESS THAN (12), + PARTITION `p12` VALUES LESS THAN (MAXVALUE)) +create table ttt4(a varchar(123) default null collate utf8mb4_unicode_ci)engine=innodb default charset=utf8mb4 collate=utf8mb4_unicode_ci; +show create table `ttt4`; +Table Create Table +ttt4 CREATE TABLE `ttt4` ( + `a` varchar(123) COLLATE utf8mb4_unicode_ci DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci +create table ttt5(a varchar(123) default null)engine=innodb default charset=utf8mb4 collate=utf8mb4_bin; +show create table `ttt5`; +Table Create Table +ttt5 CREATE TABLE `ttt5` ( + `a` varchar(123) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists t; +create table t(a int, b real); +alter table t add index expr_idx((a*b+1)); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `a` int(11) DEFAULT NULL, + `b` double DEFAULT NULL, + KEY `expr_idx` ((`a` * `b` + 1)) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop sequence if exists seq; +create sequence seq; +show create table seq; +Sequence Create Sequence +seq CREATE SEQUENCE `seq` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB +drop table if exists binary_collate; +create table binary_collate(a varchar(10)) default collate=binary; +show create table binary_collate; +Table Create Table +binary_collate CREATE TABLE `binary_collate` ( + `a` varbinary(10) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=binary +drop table if exists binary_collate; +create table binary_collate(a varchar(10)) default charset=binary collate=binary; +show create table binary_collate; +Table Create Table +binary_collate CREATE TABLE `binary_collate` ( + `a` varbinary(10) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=binary +drop table if exists binary_collate; +create table binary_collate(a varchar(10)) default charset=utf8mb4 collate=utf8mb4_bin; +show create table binary_collate; +Table Create Table +binary_collate CREATE TABLE `binary_collate` ( + `a` varchar(10) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists default_num; +create table default_num(a int default 11); +show create table default_num; +Table Create Table +default_num CREATE TABLE `default_num` ( + `a` int(11) DEFAULT '11' +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists default_varchar; +create table default_varchar(a varchar(10) default "haha"); +show create table default_varchar; +Table Create Table +default_varchar CREATE TABLE `default_varchar` ( + `a` varchar(10) DEFAULT 'haha' +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists default_sequence; +create table default_sequence(a int default nextval(seq)); +show create table default_sequence; +Table Create Table +default_sequence CREATE TABLE `default_sequence` ( + `a` int(11) DEFAULT nextval(`executor__show`.`seq`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +set @@foreign_key_checks=0; +DROP TABLE IF EXISTS parent, child; +CREATE TABLE child (id INT NOT NULL PRIMARY KEY auto_increment, parent_id INT NOT NULL, INDEX par_ind (parent_id), CONSTRAINT child_ibfk_1 FOREIGN KEY (parent_id) REFERENCES parent(id)); +CREATE TABLE parent ( id INT NOT NULL PRIMARY KEY auto_increment ); +show create table child; +Table Create Table +child CREATE TABLE `child` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `parent_id` int(11) NOT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */, + KEY `par_ind` (`parent_id`), + CONSTRAINT `child_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `executor__show`.`parent` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +DROP TABLE child; +CREATE TABLE child (id INT NOT NULL PRIMARY KEY auto_increment, parent_id INT NOT NULL, INDEX par_ind (parent_id), CONSTRAINT child_ibfk_1 FOREIGN KEY (parent_id) REFERENCES parent(id) ON DELETE RESTRICT ON UPDATE CASCADE); +show create table child; +Table Create Table +child CREATE TABLE `child` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `parent_id` int(11) NOT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */, + KEY `par_ind` (`parent_id`), + CONSTRAINT `child_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `executor__show`.`parent` (`id`) ON DELETE RESTRICT ON UPDATE CASCADE +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +create database test1; +create database test2; +create table test1.t1 (id int key, b int, index(b)); +create table test2.t2 (id int key, b int, foreign key fk(b) references test1.t1(id)); +show create table test2.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `id` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */, + KEY `fk` (`b`), + CONSTRAINT `fk` FOREIGN KEY (`b`) REFERENCES `test1`.`t1` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists t; +create table t(a int, b char(10) as ('a')); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `a` int(11) DEFAULT NULL, + `b` char(10) GENERATED ALWAYS AS (_utf8mb4'a') VIRTUAL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists t; +create table t(a int, b char(10) as (_utf8'a')); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `a` int(11) DEFAULT NULL, + `b` char(10) GENERATED ALWAYS AS (_utf8'a') VIRTUAL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +set @@session.tidb_enable_list_partition = ON; +DROP TABLE IF EXISTS t; +create table t (id int, name varchar(10), unique index idx (id)) partition by list (id) ( +partition p0 values in (3,5,6,9,17), +partition p1 values in (1,2,10,11,19,20), +partition p2 values in (4,12,13,14,18), +partition p3 values in (7,8,15,16,null) +); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `id` int(11) DEFAULT NULL, + `name` varchar(10) DEFAULT NULL, + UNIQUE KEY `idx` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY LIST (`id`) +(PARTITION `p0` VALUES IN (3,5,6,9,17), + PARTITION `p1` VALUES IN (1,2,10,11,19,20), + PARTITION `p2` VALUES IN (4,12,13,14,18), + PARTITION `p3` VALUES IN (7,8,15,16,NULL)) +DROP TABLE IF EXISTS t; +create table t (id int, name varchar(10), unique index idx (id)) partition by list columns (id) ( +partition p0 values in (3,5,6,9,17), +partition p1 values in (1,2,10,11,19,20), +partition p2 values in (4,12,13,14,18), +partition p3 values in (7,8,15,16,null) +); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `id` int(11) DEFAULT NULL, + `name` varchar(10) DEFAULT NULL, + UNIQUE KEY `idx` (`id`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY LIST COLUMNS(`id`) +(PARTITION `p0` VALUES IN (3,5,6,9,17), + PARTITION `p1` VALUES IN (1,2,10,11,19,20), + PARTITION `p2` VALUES IN (4,12,13,14,18), + PARTITION `p3` VALUES IN (7,8,15,16,NULL)) +DROP TABLE IF EXISTS t; +create table t (id int, name varchar(10), unique index idx (id, name)) partition by list columns (id, name) ( +partition p0 values in ((3, '1'), (5, '5')), +partition p1 values in ((1, '1'))); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `id` int(11) DEFAULT NULL, + `name` varchar(10) DEFAULT NULL, + UNIQUE KEY `idx` (`id`,`name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY LIST COLUMNS(`id`,`name`) +(PARTITION `p0` VALUES IN ((3,'1'),(5,'5')), + PARTITION `p1` VALUES IN ((1,'1'))) +DROP TABLE IF EXISTS t; +create table t (id int primary key, v varchar(255) not null, key idx_v (v) comment 'foo\'bar'); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `id` int(11) NOT NULL, + `v` varchar(255) NOT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */, + KEY `idx_v` (`v`) COMMENT 'foo''bar' +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +CREATE TABLE `thash` ( +`id` bigint unsigned NOT NULL, +`data` varchar(255) DEFAULT NULL, +PRIMARY KEY (`id`) +) +PARTITION BY HASH (`id`) +(PARTITION pEven COMMENT = "Even ids", +PARTITION pOdd COMMENT = "Odd ids"); +show create table `thash`; +Table Create Table +thash CREATE TABLE `thash` ( + `id` bigint(20) unsigned NOT NULL, + `data` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH (`id`) +(PARTITION `pEven` COMMENT 'Even ids', + PARTITION `pOdd` COMMENT 'Odd ids') +drop table if exists `thash`; +CREATE TABLE `thash` ( +`id` bigint unsigned NOT NULL, +`data` varchar(255) DEFAULT NULL, +PRIMARY KEY (`id`) +) +PARTITION BY HASH (`id`); +show create table `thash`; +Table Create Table +thash CREATE TABLE `thash` ( + `id` bigint(20) unsigned NOT NULL, + `data` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH (`id`) PARTITIONS 1 +drop table if exists t; +create table t(a int primary key, b varchar(20) default '\\'); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `a` int(11) NOT NULL, + `b` varchar(20) DEFAULT '\\', + PRIMARY KEY (`a`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists t; +create table t(a set('a', 'b') charset binary,b enum('a', 'b') charset ascii); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `a` set('a','b') CHARACTER SET binary COLLATE binary DEFAULT NULL, + `b` enum('a','b') CHARACTER SET ascii COLLATE ascii_bin DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists t; +create table t(a bit default (rand())); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `a` bit(1) DEFAULT rand() +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop table if exists t; +create table t (a varchar(255) character set ascii) partition by range columns (a) (partition p values less than (0xff)); +Error 1654 (HY000): Partition column values of incorrect type +create table t (a varchar(255) character set ascii) partition by range columns (a) (partition p values less than (0x7f)); +show create table t; +Table Create Table +t CREATE TABLE `t` ( + `a` varchar(255) CHARACTER SET ascii COLLATE ascii_bin DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY RANGE COLUMNS(`a`) +(PARTITION `p` VALUES LESS THAN (x'7f')) +set @@session.tidb_enable_list_partition = default; +set @@foreign_key_checks=default; +create table if not exists show_errors (a int); +create table show_errors (a int); +Error 1050 (42S01): Table 'executor__show.show_errors' already exists +show errors; +Level Code Message +Error 1050 Table 'executor__show.show_errors' already exists +select 1; +1 +1 +create invalid; +[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 14 near "invalid;" +show errors; +Level Code Message +show tables; +Error 1046 (3D000): No database selected +show tables; +Error 1046 (3D000): No database selected +admin show slow recent 3; +admin show slow top 3; +admin show slow top internal 3; +admin show slow top all 3; +drop table if exists v1; +drop view if exists v1; +drop sequence if exists seq1; +drop table if exists seq1; +create view v1 as select 1; +create temporary table v1 (a int); +show create table v1; +Table Create Table +v1 CREATE TEMPORARY TABLE `v1` ( + `a` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +drop view v1; +show create view v1; +Error 1146 (42S02): Table 'executor__show.v1' doesn't exist +drop view if exists seq1; +create sequence seq1; +create temporary table seq1 (a int); +show create sequence seq1; +Table Create Table +seq1 CREATE SEQUENCE `seq1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB +drop sequence seq1; +show create sequence seq1; +Error 1146 (42S02): Table 'executor__show.seq1' doesn't exist +show builtins; +Supported_builtin_functions +abs +acos +adddate +addtime +aes_decrypt +aes_encrypt +and +any_value +ascii +asin +atan +atan2 +benchmark +bin +bin_to_uuid +bit_count +bit_length +bitand +bitneg +bitor +bitxor +case +ceil +ceiling +char_func +char_length +character_length +charset +coalesce +coercibility +collation +compress +concat +concat_ws +connection_id +conv +convert +convert_tz +cos +cot +crc32 +curdate +current_date +current_resource_group +current_role +current_time +current_timestamp +current_user +curtime +database +date +date_add +date_format +date_sub +datediff +day +dayname +dayofmonth +dayofweek +dayofyear +decode +default_func +degrees +des_decrypt +des_encrypt +div +elt +encode +encrypt +eq +exp +export_set +extract +field +find_in_set +floor +format +format_bytes +format_nano_time +found_rows +from_base64 +from_days +from_unixtime +ge +get_format +get_lock +getparam +greatest +grouping +gt +hex +hour +if +ifnull +ilike +in +inet6_aton +inet6_ntoa +inet_aton +inet_ntoa +insert_func +instr +intdiv +interval +is_free_lock +is_ipv4 +is_ipv4_compat +is_ipv4_mapped +is_ipv6 +is_used_lock +is_uuid +isfalse +isnull +istrue +json_array +json_array_append +json_array_insert +json_contains +json_contains_path +json_depth +json_extract +json_insert +json_keys +json_length +json_memberof +json_merge +json_merge_patch +json_merge_preserve +json_object +json_overlaps +json_pretty +json_quote +json_remove +json_replace +json_search +json_set +json_storage_free +json_storage_size +json_type +json_unquote +json_valid +last_day +last_insert_id +lastval +lcase +le +least +left +leftshift +length +like +ln +load_file +localtime +localtimestamp +locate +log +log10 +log2 +lower +lpad +lt +ltrim +make_set +makedate +maketime +master_pos_wait +md5 +microsecond +mid +minus +minute +mod +month +monthname +mul +name_const +ne +nextval +not +now +nulleq +oct +octet_length +old_password +or +ord +password_func +period_add +period_diff +pi +plus +position +pow +power +quarter +quote +radians +rand +random_bytes +regexp +regexp_instr +regexp_like +regexp_replace +regexp_substr +release_all_locks +release_lock +repeat +replace +reverse +right +rightshift +round +row_count +rpad +rtrim +schema +sec_to_time +second +session_user +setval +setvar +sha +sha1 +sha2 +sign +sin +sleep +sm3 +space +sqrt +str_to_date +strcmp +subdate +substr +substring +substring_index +subtime +sysdate +system_user +tan +tidb_bounded_staleness +tidb_current_tso +tidb_decode_binary_plan +tidb_decode_key +tidb_decode_plan +tidb_decode_sql_digests +tidb_encode_sql_digest +tidb_is_ddl_owner +tidb_parse_tso +tidb_parse_tso_logical +tidb_row_checksum +tidb_shard +tidb_version +time +time_format +time_to_sec +timediff +timestamp +timestampadd +timestampdiff +to_base64 +to_days +to_seconds +translate +trim +truncate +ucase +unaryminus +uncompress +uncompressed_length +unhex +unix_timestamp +upper +user +utc_date +utc_time +utc_timestamp +uuid +uuid_short +uuid_to_bin +validate_password_strength +version +vitess_hash +week +weekday +weekofyear +weight_string +xor +year +yearweek +SHOW INDEX FROM performance_schema.events_statements_summary_by_digest; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment Visible Expression Clustered +events_statements_summary_by_digest 0 SCHEMA_NAME 1 SCHEMA_NAME A 0 NULL NULL YES BTREE YES NULL NO +events_statements_summary_by_digest 0 SCHEMA_NAME 2 DIGEST A 0 NULL NULL YES BTREE YES NULL NO +drop table if exists t1, t3, t4, t5, t6, t7; +create global temporary table t1 (id int) on commit delete rows; +create global temporary table t3 (i int primary key, j int) on commit delete rows; +show create table t1; +Table Create Table +t1 CREATE GLOBAL TEMPORARY TABLE `t1` ( + `id` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS +show create table t3; +Table Create Table +t3 CREATE GLOBAL TEMPORARY TABLE `t3` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS +CREATE GLOBAL TEMPORARY TABLE `t4` ( +`i` int(11) NOT NULL, +`j` int(11) DEFAULT NULL, +PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS; +CREATE GLOBAL TEMPORARY TABLE t5 ( +id int(11) NOT NULL AUTO_INCREMENT, +b int(11) NOT NULL, +pad varbinary(255) DEFAULT NULL, +PRIMARY KEY (id), +KEY b (b)) ON COMMIT DELETE ROWS; +show create table t5; +Table Create Table +t5 CREATE GLOBAL TEMPORARY TABLE `t5` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `b` int(11) NOT NULL, + `pad` varbinary(255) DEFAULT NULL, + PRIMARY KEY (`id`) /*T![clustered_index] CLUSTERED */, + KEY `b` (`b`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS +create temporary table t6 (i int primary key, j int); +show create table t6; +Table Create Table +t6 CREATE TEMPORARY TABLE `t6` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +create temporary table t7 (i int primary key auto_increment, j int); +begin; +insert into t7 (j) values (14); +insert into t7 (j) values (24); +select * from t7; +i j +1 14 +2 24 +show create table t7; +Table Create Table +t7 CREATE TEMPORARY TABLE `t7` ( + `i` int(11) NOT NULL AUTO_INCREMENT, + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin AUTO_INCREMENT=3 +commit; +drop table if exists t1; +create table t1 (id int); +alter table t1 cache; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin /* CACHED ON */ +select create_options from information_schema.tables where table_schema = 'executor__show' and table_name = 't1'; +create_options +cached=on +alter table t1 nocache; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` int(11) DEFAULT NULL +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +select create_options from information_schema.tables where table_schema = 'executor__show' and table_name = 't1'; +create_options + +DROP DATABASE IF EXISTS `TEST_$1`; +DROP DATABASE IF EXISTS `test_$2`; +CREATE DATABASE `TEST_$1`; +CREATE DATABASE `test_$2`; +SHOW DATABASES LIKE 'TEST_$%'; +Database (TEST_$%) +TEST_$1 +test_$2 +SHOW DATABASES LIKE 'test_$%'; +Database (test_$%) +TEST_$1 +test_$2 +SHOW COLLATION LIKE 'UTF8MB4_BI%'; +Collation Charset Id Default Compiled Sortlen +utf8mb4_bin utf8mb4 46 Yes Yes 1 +SHOW COLLATION LIKE 'utf8mb4_bi%'; +Collation Charset Id Default Compiled Sortlen +utf8mb4_bin utf8mb4 46 Yes Yes 1 +drop user if exists 'show'; +drop database if exists AAAA; +drop database if exists BBBB; +create user 'show'@'%'; +show databases; +Database +INFORMATION_SCHEMA +create database AAAA; +create database BBBB; +grant select on AAAA.* to 'show'@'%'; +grant select on BBBB.* to 'show'@'%'; +show databases; +Database +INFORMATION_SCHEMA +AAAA +BBBB +drop user 'show'@'%'; +drop database AAAA; +drop database BBBB; +DROP table IF EXISTS `T1`; +CREATE table `T1` (a int); +SHOW table status LIKE 't1'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +T1 InnoDB 10 Compact 0 0 0 0 0 0 NULL 1 NULL NULL utf8mb4_bin +DROP table IF EXISTS `Li_1`; +DROP table IF EXISTS `li_2`; +CREATE table `Li_1` (a int); +CREATE table `li_2` (a int); +SHOW table status LIKE 'li%'; +Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment +Li_1 InnoDB 10 Compact 0 0 0 0 0 0 NULL 1 NULL NULL utf8mb4_bin +li_2 InnoDB 10 Compact 0 0 0 0 0 0 NULL 1 NULL NULL utf8mb4_bin +SET GLOBAL authentication_ldap_sasl_bind_root_pwd = ''; +show variables like 'authentication_ldap_sasl_bind_root_pwd'; +Variable_name Value +authentication_ldap_sasl_bind_root_pwd +SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_sasl_bind_root_pwd'; +current_value + +SET GLOBAL authentication_ldap_sasl_bind_root_pwd = password; +show variables like 'authentication_ldap_sasl_bind_root_pwd'; +Variable_name Value +authentication_ldap_sasl_bind_root_pwd ****** +SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_sasl_bind_root_pwd'; +current_value +****** +SET GLOBAL authentication_ldap_simple_bind_root_pwd = ''; +show variables like 'authentication_ldap_simple_bind_root_pwd'; +Variable_name Value +authentication_ldap_simple_bind_root_pwd +SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_simple_bind_root_pwd'; +current_value + +SET GLOBAL authentication_ldap_simple_bind_root_pwd = password; +show variables like 'authentication_ldap_simple_bind_root_pwd'; +Variable_name Value +authentication_ldap_simple_bind_root_pwd ****** +SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_simple_bind_root_pwd'; +current_value +****** +SET GLOBAL authentication_ldap_simple_bind_root_pwd = default; +SET GLOBAL authentication_ldap_sasl_bind_root_pwd = default; +show collation; +Collation Charset Id Default Compiled Sortlen +ascii_bin ascii 65 Yes Yes 1 +binary binary 63 Yes Yes 1 +gbk_bin gbk 87 Yes 1 +gbk_chinese_ci gbk 28 Yes Yes 1 +latin1_bin latin1 47 Yes Yes 1 +utf8_bin utf8 83 Yes Yes 1 +utf8_general_ci utf8 33 Yes 1 +utf8_unicode_ci utf8 192 Yes 1 +utf8mb4_0900_ai_ci utf8mb4 255 Yes 1 +utf8mb4_0900_bin utf8mb4 309 Yes 1 +utf8mb4_bin utf8mb4 46 Yes Yes 1 +utf8mb4_general_ci utf8mb4 45 Yes 1 +utf8mb4_unicode_ci utf8mb4 224 Yes 1 +select * from information_schema.COLLATIONS; +COLLATION_NAME CHARACTER_SET_NAME ID IS_DEFAULT IS_COMPILED SORTLEN +ascii_bin ascii 65 Yes Yes 1 +binary binary 63 Yes Yes 1 +gbk_bin gbk 87 Yes 1 +gbk_chinese_ci gbk 28 Yes Yes 1 +latin1_bin latin1 47 Yes Yes 1 +utf8_bin utf8 83 Yes Yes 1 +utf8_general_ci utf8 33 Yes 1 +utf8_unicode_ci utf8 192 Yes 1 +utf8mb4_0900_ai_ci utf8mb4 255 Yes 1 +utf8mb4_0900_bin utf8mb4 309 Yes 1 +utf8mb4_bin utf8mb4 46 Yes Yes 1 +utf8mb4_general_ci utf8mb4 45 Yes 1 +utf8mb4_unicode_ci utf8mb4 224 Yes 1 +show character set like '%utf8mb4%'; +Charset Description Default collation Maxlen +utf8mb4 UTF-8 Unicode utf8mb4_bin 4 +select * from information_schema.COLLATIONS where IS_DEFAULT='Yes' and CHARACTER_SET_NAME='utf8mb4'; +COLLATION_NAME CHARACTER_SET_NAME ID IS_DEFAULT IS_COMPILED SORTLEN +utf8mb4_bin utf8mb4 46 Yes Yes 1 +set @@session.default_collation_for_utf8mb4='utf8mb4_0900_ai_ci'; +show variables like 'default_collation_for_utf8mb4'; +Variable_name Value +default_collation_for_utf8mb4 utf8mb4_0900_ai_ci +show collation; +Collation Charset Id Default Compiled Sortlen +ascii_bin ascii 65 Yes Yes 1 +binary binary 63 Yes Yes 1 +gbk_bin gbk 87 Yes 1 +gbk_chinese_ci gbk 28 Yes Yes 1 +latin1_bin latin1 47 Yes Yes 1 +utf8_bin utf8 83 Yes Yes 1 +utf8_general_ci utf8 33 Yes 1 +utf8_unicode_ci utf8 192 Yes 1 +utf8mb4_0900_ai_ci utf8mb4 255 Yes Yes 1 +utf8mb4_0900_bin utf8mb4 309 Yes 1 +utf8mb4_bin utf8mb4 46 Yes 1 +utf8mb4_general_ci utf8mb4 45 Yes 1 +utf8mb4_unicode_ci utf8mb4 224 Yes 1 +select * from information_schema.COLLATIONS; +COLLATION_NAME CHARACTER_SET_NAME ID IS_DEFAULT IS_COMPILED SORTLEN +ascii_bin ascii 65 Yes Yes 1 +binary binary 63 Yes Yes 1 +gbk_bin gbk 87 Yes 1 +gbk_chinese_ci gbk 28 Yes Yes 1 +latin1_bin latin1 47 Yes Yes 1 +utf8_bin utf8 83 Yes Yes 1 +utf8_general_ci utf8 33 Yes 1 +utf8_unicode_ci utf8 192 Yes 1 +utf8mb4_0900_ai_ci utf8mb4 255 Yes 1 +utf8mb4_0900_bin utf8mb4 309 Yes 1 +utf8mb4_bin utf8mb4 46 Yes Yes 1 +utf8mb4_general_ci utf8mb4 45 Yes 1 +utf8mb4_unicode_ci utf8mb4 224 Yes 1 +show character set like '%utf8mb4%'; +Charset Description Default collation Maxlen +utf8mb4 UTF-8 Unicode utf8mb4_0900_ai_ci 4 +select * from information_schema.COLLATIONS where IS_DEFAULT='Yes' and CHARACTER_SET_NAME='utf8mb4'; +COLLATION_NAME CHARACTER_SET_NAME ID IS_DEFAULT IS_COMPILED SORTLEN +utf8mb4_bin utf8mb4 46 Yes Yes 1 +set @@session.default_collation_for_utf8mb4=default; diff --git a/tests/integrationtest/r/executor/simple.result b/tests/integrationtest/r/executor/simple.result new file mode 100644 index 0000000000000..eee717ed2d97a --- /dev/null +++ b/tests/integrationtest/r/executor/simple.result @@ -0,0 +1,418 @@ +FLUSH TABLES; +FLUSH TABLES WITH READ LOCK; +Error 1105 (HY000): FLUSH TABLES WITH READ LOCK is not supported. Please use @@tidb_snapshot +USE test; +USE ``; +Error 1046 (3D000): No database selected +use executor__simple; +drop user if exists 'user_admin'@'localhost'; +drop user if exists test_create_user; +create user 'user_admin'@'localhost'; +grant create user on *.* to 'user_admin'@'localhost'; +create user test_create_user; +drop user test_create_user; +revoke create user on *.* from 'user_admin'@'localhost'; +grant insert, delete on mysql.user to 'user_admin'@'localhost'; +create user test_create_user; +drop user test_create_user; +create role test_create_user; +drop role test_create_user; +drop user 'user_admin'@'localhost'; +drop role if exists r1, r2, r3; +create role r2; +create role r1, r2, r3; +Error 1396 (HY000): Operation CREATE ROLE failed for 'r2'@'%' +SELECT user FROM mysql.User WHERE user in ('r1', 'r2', 'r3'); +user +r2 +drop role r1, r2, r3; +Error 1396 (HY000): Operation DROP ROLE failed for r1@% +SELECT user FROM mysql.User WHERE user in ('r1', 'r2', 'r3'); +user +r2 +drop role r2; +DROP USER IF EXISTS issue23649; +CREATE USER issue23649; +GRANT bogusrole to issue23649; +Error 3523 (HY000): Unknown authorization ID `bogusrole`@`%` +GRANT bogusrole to nonexisting; +Error 3523 (HY000): Unknown authorization ID `bogusrole`@`%` +drop user if exists issue28534; +CREATE USER issue28534; +SET PASSWORD FOR CURRENT_USER() = "43582eussi"; +SELECT authentication_string FROM mysql.User WHERE User="issue28534"; +authentication_string +*ED69FD0F45ED6D6D31345869E17860014701E007 +DROP USER IF EXISTS issue28534; +drop user if exists u29473; +drop role if exists r29473; +CREATE USER u29473; +CREATE ROLE r29473; +GRANT r29473 TO u29473; +GRANT CREATE USER ON *.* TO u29473; +SET ROLE r29473; +DROP ROLE r29473; +SHOW GRANTS; +Grants for User +GRANT CREATE USER ON *.* TO 'u29473'@'%' +DROP USER IF EXISTS u29473; +drop table if exists t1; +drop user if exists u1; +create table t1(id int, v int); +CREATE USER u1 require ssl; +GRANT CREATE ON executor__simple.* TO u1; +GRANT UPDATE ON executor__simple.t1 TO u1; +GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO u1; +GRANT SELECT(v), UPDATE(v) on executor__simple.t1 TO u1; +SELECT COUNT(1) FROM mysql.global_grants WHERE USER='u1' AND HOST='%'; +COUNT(1) +1 +SELECT COUNT(1) FROM mysql.global_priv WHERE USER='u1' AND HOST='%'; +COUNT(1) +1 +SELECT COUNT(1) FROM mysql.tables_priv WHERE USER='u1' AND HOST='%'; +COUNT(1) +1 +SELECT COUNT(1) FROM mysql.columns_priv WHERE USER='u1' AND HOST='%'; +COUNT(1) +1 +SHOW GRANTS FOR u1; +Grants for u1@% +GRANT USAGE ON *.* TO 'u1'@'%' +GRANT CREATE ON `executor__simple`.* TO 'u1'@'%' +GRANT UPDATE ON `executor__simple`.`t1` TO 'u1'@'%' +GRANT SELECT(v), UPDATE(v) ON `executor__simple`.`t1` TO 'u1'@'%' +GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO 'u1'@'%' +DROP USER u1; +SHOW GRANTS FOR u1; +Error 1141 (42000): There is no such grant defined for user 'u1' on host '%' +SELECT * FROM mysql.global_grants WHERE USER='u1' AND HOST='%'; +USER HOST PRIV WITH_GRANT_OPTION +SELECT * FROM mysql.global_priv WHERE USER='u1' AND HOST='%'; +Host User Priv +SELECT * FROM mysql.tables_priv WHERE USER='u1' AND HOST='%'; +Host DB User Table_name Grantor Timestamp Table_priv Column_priv +SELECT * FROM mysql.columns_priv WHERE USER='u1' AND HOST='%'; +Host DB User Table_name Column_name Timestamp Column_priv +DROP USER IF EXISTS u1; +drop table t1; +drop role if exists r1,r2,r3; +create role r1, r2, r3; +grant r1,r2,r3 to current_user(); +set role all; +revoke r1, r3 from root; +drop role r1; +drop role if exists r1, r2, r3; +drop table if exists ic, xx; +create table ic (id int primary key); +begin; +insert into ic values (0); +create table xx (id int); +select * from ic where id = 0; +id +0 +delete from ic; +rollback; +begin; +insert into ic values (1); +create user 'xx'@'127.0.0.1'; +select * from ic where id = 1; +id +1 +delete from ic; +rollback; +begin; +insert into ic values (2); +grant SELECT on executor__simple.ic to 'xx'@'127.0.0.1'; +select * from ic where id = 2; +id +2 +delete from ic; +rollback; +begin; +insert into ic values (3); +flush privileges; +select * from ic where id = 3; +id +3 +delete from ic; +rollback; +begin; +insert into ic values (4); +analyze table ic; +select * from ic where id = 4; +id +4 +delete from ic; +rollback; +DO sum(1); +DO avg(@e+@f); +DO GROUP_CONCAT(NULLIF(ELT(1, @e), 2.0) ORDER BY 1); +drop user if exists test_all; +create user test_all; +set default role all to test_all; +drop user if exists 'testflush'@'localhost'; +CREATE USER 'testflush'@'localhost' IDENTIFIED BY ''; +UPDATE mysql.User SET Select_priv='Y' WHERE User="testflush" and Host="localhost"; +SELECT authentication_string FROM mysql.User WHERE User="testflush" and Host="localhost"; +Error 1142 (42000): SELECT command denied to user 'testflush'@'localhost' for table 'user' +FLUSH PRIVILEGES; +SELECT authentication_string FROM mysql.User WHERE User="testflush" and Host="localhost"; +authentication_string + +drop user if exists 'bob'@'localhost'; +drop user if exists 'bob2'@'localhost'; +CREATE USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_simple AS 'uid=bob,ou=People,dc=example,dc=com'; +SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'; +Host User authentication_string plugin +localhost bob uid=bob,ou=People,dc=example,dc=com authentication_ldap_simple +CREATE USER 'bob2'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob2,ou=People,dc=example,dc=com'; +SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob2'; +Host User authentication_string plugin +localhost bob2 uid=bob2,ou=People,dc=example,dc=com authentication_ldap_sasl +drop user if exists 'bob'@'localhost'; +CREATE USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_simple AS 'uid=bob,ou=People,dc=example,dc=com'; +SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'; +Host User authentication_string plugin +localhost bob uid=bob,ou=People,dc=example,dc=com authentication_ldap_simple +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'; +SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'; +Host User authentication_string plugin +localhost bob uid=bob,ou=Manager,dc=example,dc=com authentication_ldap_sasl +ALTER USER 'bob'@'localhost' PASSWORD HISTORY 5 +; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=People,dc=example,dc=com'; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=People,dc=example,dc=com'; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'; +drop user if exists u1, u2, u3, u4, u5; +set global validate_password.enable = 1; +create user u1 identified with 'tidb_auth_token'; +create user u2 identified with 'auth_socket'; +create user u3 identified with 'authentication_ldap_simple'; +create user u4 identified with 'authentication_ldap_sasl'; +create user u5 identified with 'mysql_native_password'; +Error 1819 (HY000): Your password does not satisfy the current policy requirements (Require Password Length: 8) +create user u5 identified with 'caching_sha2_password'; +Error 1819 (HY000): Your password does not satisfy the current policy requirements (Require Password Length: 8) +create user u5 identified with 'tidb_sm3_password'; +Error 1819 (HY000): Your password does not satisfy the current policy requirements (Require Password Length: 8) +create user u5 identified with 'mysql_clear_password'; +Error 1524 (HY000): Plugin 'mysql_clear_password' is not loaded +create user u5 identified with 'tidb_session_token'; +Error 1524 (HY000): Plugin 'tidb_session_token' is not loaded +set global validate_password.enable = default; +drop role if exists 'r1'; +create role 'r1' ; +grant 'r1' to current_user(); +revoke 'r1' from current_user(); +grant 'r1' to current_user(),current_user(); +revoke 'r1' from current_user(),current_user(); +drop role 'r1' ; +drop role if exists 'targetRole'; +drop user if exists 'testRoleAdmin'; +CREATE USER 'testRoleAdmin'; +CREATE ROLE 'targetRole'; +GRANT `targetRole` TO `testRoleAdmin`; +Error 1227 (42000): Access denied; you need (at least one of) the SUPER or ROLE_ADMIN privilege(s) for this operation +GRANT SUPER ON *.* TO `testRoleAdmin`; +GRANT `targetRole` TO `testRoleAdmin`; +REVOKE `targetRole` FROM `testRoleAdmin`; +DROP USER 'testRoleAdmin'; +DROP ROLE 'targetRole'; +drop role if exists r_1, r_2, r_3, u_1; +CREATE ROLE r_1, r_2, r_3, u_1; +insert into mysql.role_edges (FROM_HOST,FROM_USER,TO_HOST,TO_USER) values ('%','r_1','%','u_1'); +insert into mysql.role_edges (FROM_HOST,FROM_USER,TO_HOST,TO_USER) values ('%','r_2','%','u_1'); +flush privileges; +SET DEFAULT ROLE r_3 TO u_1; +Error 3530 (HY000): `r_3`@`%` is not granted to u_1@% +SET DEFAULT ROLE r_1 TO u_1000; +Error 1396 (HY000): Operation SET DEFAULT ROLE failed for u_1000@% +SET DEFAULT ROLE r_1, r_3 TO u_1; +Error 3530 (HY000): `r_3`@`%` is not granted to u_1@% +SET DEFAULT ROLE r_1 TO u_1; +SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"; +DEFAULT_ROLE_USER +r_1 +SET DEFAULT ROLE r_2 TO u_1; +SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"; +DEFAULT_ROLE_USER +r_2 +SET DEFAULT ROLE ALL TO u_1; +SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"; +DEFAULT_ROLE_USER +r_1 +r_2 +SET DEFAULT ROLE NONE TO u_1; +SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"; +DEFAULT_ROLE_USER +DROP USER r_1, r_2, r_3, u_1; +drop user if exists 'issue17247'; +create user 'issue17247'; +grant CREATE USER on *.* to 'issue17247'; +ALTER USER USER() IDENTIFIED BY 'xxx'; +ALTER USER CURRENT_USER() IDENTIFIED BY 'yyy'; +ALTER USER CURRENT_USER IDENTIFIED BY 'zzz'; +ALTER USER 'issue17247'@'%' IDENTIFIED BY 'kkk'; +ALTER USER 'issue17247'@'%' IDENTIFIED BY PASSWORD '*B50FBDB37F1256824274912F2A1CE648082C3F1F'; +ALTER USER USER() IDENTIFIED BY PASSWORD '*B50FBDB37F1256824274912F2A1CE648082C3F1F'; +[parser:1064]You have an error in your SQL syntax; check the manual that corresponds to your TiDB version for the right syntax to use line 1 column 40 near "PASSWORD '*B50FBDB37F1256824274912F2A1CE648082C3F1F';" +drop table if exists t; +do 1, @a:=1; +select @a; +@a +1 +create table t (i int); +insert into t values (1); +select * from t; +i +1 +do @a := (select * from t where i = 1); +insert into t values (2); +select * from t; +i +1 +2 +drop user if exists set_role_all; +create user set_role_all; +set role all; +select current_role; +current_role +NONE +drop user if exists testCreateRole; +drop role if exists test_create_role; +create user testCreateRole; +grant CREATE USER on *.* to testCreateRole; +create role test_create_role; +revoke CREATE USER on *.* from testCreateRole; +grant CREATE ROLE on *.* to testCreateRole; +drop role test_create_role; +create role test_create_role; +drop role test_create_role; +create user test_create_role; +Error 1227 (42000): Access denied; you need (at least one of) the CREATE User privilege(s) for this operation +drop user testCreateRole; +drop user if exists testCreateRole; +drop role if exists test_create_role; +create user testCreateRole; +create user test_create_role; +grant CREATE USER on *.* to testCreateRole; +drop role test_create_role; +revoke CREATE USER on *.* from testCreateRole; +create role test_create_role; +grant DROP ROLE on *.* to testCreateRole; +drop role test_create_role; +create user test_create_role; +drop user test_create_role; +Error 1227 (42000): Access denied; you need (at least one of) the CREATE USER privilege(s) for this operation +drop user testCreateRole; +SET GLOBAL tidb_enable_resource_control='on'; +drop RESOURCE GROUP if exists rg1; +drop RESOURCE GROUP if exists rg2; +drop user if exists user1; +SET RESOURCE GROUP rg1; +Error 8249 (HY000): Unknown resource group 'rg1' +CREATE RESOURCE GROUP rg1 ru_per_sec = 100; +create user user1; +ALTER USER `user1` RESOURCE GROUP `rg1`; +SELECT CURRENT_RESOURCE_GROUP(); +CURRENT_RESOURCE_GROUP() +default +SELECT CURRENT_RESOURCE_GROUP(); +CURRENT_RESOURCE_GROUP() +rg1 +SELECT CURRENT_RESOURCE_GROUP(); +CURRENT_RESOURCE_GROUP() +default +CREATE RESOURCE GROUP rg2 ru_per_sec = 200; +SET RESOURCE GROUP `rg2`; +SELECT CURRENT_RESOURCE_GROUP(); +CURRENT_RESOURCE_GROUP() +rg2 +SET RESOURCE GROUP ``; +SELECT CURRENT_RESOURCE_GROUP(); +CURRENT_RESOURCE_GROUP() +default +SET RESOURCE GROUP default; +SELECT CURRENT_RESOURCE_GROUP(); +CURRENT_RESOURCE_GROUP() +default +SELECT CURRENT_RESOURCE_GROUP(); +CURRENT_RESOURCE_GROUP() +rg1 +drop user user1; +SET GLOBAL tidb_enable_resource_control=default; +drop user if exists testuser; +drop user if exists testuser1; +drop user if exists testuser2; +CREATE USER testuser COMMENT '1234'; +CREATE USER testuser1 ATTRIBUTE '{"name": "Tom", "age": 19}'; +CREATE USER testuser2 ATTRIBUTE '{"name": "Tom", age: 19}'; +Error 3140 (22032): Invalid JSON text: The document root must not be followed by other values. +CREATE USER testuser2; +SELECT user_attributes FROM mysql.user WHERE user = 'testuser'; +user_attributes +{"metadata": {"comment": "1234"}} +SELECT user_attributes FROM mysql.user WHERE user = 'testuser1'; +user_attributes +{"metadata": {"age": 19, "name": "Tom"}} +SELECT user_attributes FROM mysql.user WHERE user = 'testuser2'; +user_attributes +{} +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser'; +attribute +{"comment": "1234"} +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +attribute +{"age": 19, "name": "Tom"} +SELECT attribute->>"$.age" AS age, attribute->>"$.name" AS name FROM information_schema.user_attributes WHERE user = 'testuser1'; +age name +19 Tom +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser2'; +attribute +NULL +ALTER USER testuser1 ATTRIBUTE '{"age": 20, "sex": "male"}'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +attribute +{"age": 20, "name": "Tom", "sex": "male"} +ALTER USER testuser1 ATTRIBUTE '{"hobby": "soccer"}'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +attribute +{"age": 20, "hobby": "soccer", "name": "Tom", "sex": "male"} +ALTER USER testuser1 ATTRIBUTE '{"sex": null, "hobby": null}'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +attribute +{"age": 20, "name": "Tom"} +ALTER USER testuser1 COMMENT '5678'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +attribute +{"age": 20, "comment": "5678", "name": "Tom"} +ALTER USER testuser1 COMMENT ''; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +attribute +{"age": 20, "comment": "", "name": "Tom"} +ALTER USER testuser1 ATTRIBUTE '{"comment": null}'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +attribute +{"age": 20, "name": "Tom"} +SELECT user, host, attribute FROM information_schema.user_attributes where user in ('testuser', 'testuser1', 'testuser2') ORDER BY user; +user host attribute +testuser % {"comment": "1234"} +testuser1 % {"age": 20, "name": "Tom"} +testuser2 % NULL +SELECT user, host, user_attributes FROM mysql.user ORDER BY user; +Error 1142 (42000): SELECT command denied to user 'testuser1'@'%' for table 'user' +create user usr1@'%' identified by 'passord'; +alter user usr1 comment 'comment1'; +select user_attributes from mysql.user where user = 'usr1'; +user_attributes +{"metadata": {"comment": "comment1"}} +set global tidb_enable_resource_control = 'on'; +drop RESOURCE group if exists rg1; +CREATE RESOURCE GROUP rg1 ru_per_sec = 100; +alter user usr1 resource group rg1; +select user_attributes from mysql.user where user = 'usr1'; +user_attributes +{"metadata": {"comment": "comment1"}, "resource_group": "rg1"} +set global tidb_enable_resource_control = default; diff --git a/tests/integrationtest/r/executor/split_table.result b/tests/integrationtest/r/executor/split_table.result index 482c65b167396..4bf6b2a6ded23 100644 --- a/tests/integrationtest/r/executor/split_table.result +++ b/tests/integrationtest/r/executor/split_table.result @@ -6,3 +6,83 @@ create table t(a int(20) auto_increment primary key); split table t between (-9223372036854775808) and (9223372036854775807) regions 16; Error 1690 (22003): constant -9223372036854775808 overflows int drop table if exists t; +drop table if exists t, t1; +create table t(a varchar(100),b int, index idx1(b,a)); +split table t index idx1 by (10000,"abcd"),(10000000); +split table t index idx1 by ("abcd"); +Error 1265 (01000): Incorrect value: 'abcd' for column 'b' +split table t index idx1 between (0) and (1000000000) regions 10; +split table t index idx1 between (2,'a') and (1,'c') regions 10; +Error 8212 (HY000): Failed to split region ranges: Split index `idx1` region lower value (2,a) should less than the upper value (1,c) +split table t index idx1 between () and (1) regions 10; +Error 1105 (HY000): Split index `idx1` region lower value count should more than 0 +split table t index idx1 between (1) and () regions 10; +Error 1105 (HY000): Split index `idx1` region upper value count should more than 0 +split table t index idx1 between (0) and (1000000000) regions 10000; +Error 1105 (HY000): Split index region num exceeded the limit 1000 +split table t index idx1 between (0) and (1000000000) regions 0; +Error 1105 (HY000): Split index region num should more than 0 +split table t index idx1 between ("aa") and (1000000000) regions 0; +Error 1265 (01000): Incorrect value: 'aa' for column 'b' +split table t between (0) and (1000000000) regions 10; +split table t between (2) and (1) regions 10; +Error 8212 (HY000): Failed to split region ranges: lower value 2 should less than the upper value 1 +split table t between () and (1) regions 10; +Error 1105 (HY000): Split table region lower value count should be 1 +split table t between (1) and () regions 10; +Error 1105 (HY000): Split table region upper value count should be 1 +split table t between (0) and (1000000000) regions 10000; +Error 1105 (HY000): Split table region num exceeded the limit 1000 +split table t between (0) and (1000000000) regions 0; +Error 1105 (HY000): Split table region num should more than 0 +split table t between ("aa") and (1000000000) regions 10; +Error 1265 (01000): Incorrect value: 'aa' for column '_tidb_rowid' +split table t between (0) and (100) regions 10; +Error 8212 (HY000): Failed to split region ranges: the region size is too small, expected at least 1000, but got 10 +split table t by (0),(1000),(1000000); +create table t1(a int, b int); +split table t1 between(0) and (10000) regions 10; +split table t1 between(10) and (10010) regions 5; +drop table if exists t; +create table t (a int,b int) partition by hash(a) partitions 5; +split table t between (0) and (1000000) regions 5; +split region for partition table t between (1000000) and (100000000) regions 10; +split table t partition (p1,p2) between (100000000) and (1000000000) regions 5; +split region for partition table t partition (p3,p4) between (100000000) and (1000000000) regions 5; +set tidb_enable_clustered_index=ON; +drop table if exists t; +create table t (a varchar(255), b double, c int, primary key (a, b)); +split table t between ('aaa') and ('aaa', 100.0) regions 10; +Error 1105 (HY000): Split table region lower value count should be 2 +split table t between ('aaa', 1.0) and ('aaa', 100.0, 11) regions 10; +Error 1105 (HY000): Split table region upper value count should be 2 +split table t between ('aaa', 0.0) and (100.0, 'aaa') regions 10; +Error 1265 (01000): Incorrect value: 'aaa' for column 'b' +split table t between ('aaa', 0.0) and ('aaa', 0.0) regions 10; +Error 8212 (HY000): Failed to split region ranges: Split table `t` region lower value (aaa,0) should less than the upper value (aaa,0) +split table t between ('bbb', 0.0) and ('aaa', 0.0) regions 10; +Error 8212 (HY000): Failed to split region ranges: Split table `t` region lower value (bbb,0) should less than the upper value (aaa,0) +split table t between ('aaa', 0.0) and ('aaa', 0.1) regions 100000; +Error 1105 (HY000): Split table region num exceeded the limit 1000 +split table t between (null, null) and (null, null) regions 1000; +Error 1048 (23000): Column 'a' cannot be null +split table t by (null, null); +Error 1048 (23000): Column 'a' cannot be null +split table t between ('aaa', 0.0) and ('aaa', 100.0) regions 10; +split table t by ('aaa', 0.0), ('aaa', 20.0), ('aaa', 100.0); +split table t by ('aaa', 100.0), ('qqq', 20.0), ('zzz', 100.0), ('zzz', 1000.0); +drop table t; +create table t (a int, b int, c int, d int, primary key(a, c, d)); +split table t between (0, 0, 0) and (0, 0, 1) regions 1000; +drop table t; +create table t (a int, b int, c int, d int, primary key(d, a, c)); +split table t by (0, 0, 0), (1, 2, 3), (65535, 65535, 65535); +drop table if exists t; +create table t (a varchar(255), b decimal, c int, primary key (a, b)); +split table t by ('aaa', ''); +Error 1265 (01000): Incorrect value: '' for column 'b' +drop table t; +CREATE TABLE t (`id` varchar(10) NOT NULL, primary key (`id`) CLUSTERED); +split table t index `primary` between (0) and (1000) regions 2; +Error 1176 (42000): Key 'primary' doesn't exist in table 't' +set tidb_enable_clustered_index=default; diff --git a/tests/integrationtest/r/executor/stale_txn.result b/tests/integrationtest/r/executor/stale_txn.result new file mode 100644 index 0000000000000..d1e7c85a2829b --- /dev/null +++ b/tests/integrationtest/r/executor/stale_txn.result @@ -0,0 +1,38 @@ +select * from information_schema.ddl_jobs as of timestamp now(); +drop table if exists t1; +create table t1 (id int primary key, v int); +insert into t1 values(1, 10); +select sleep(0.1); +sleep(0.1) +0 +set @a=now(6); +select sleep(0.1); +sleep(0.1) +0 +update t1 set v=100 where id=1; +select * from t1 as of timestamp @a where v=(select v from t1 as of timestamp @a where id=1); +id v +1 10 +select (select v from t1 as of timestamp @a where id=1) as v; +v +10 +set tidb_txn_mode='pessimistic'; +set tx_isolation = 'READ-COMMITTED'; +drop table if exists t1; +create table t1 (id int primary key, v int); +insert into t1 values(1, 10); +select sleep(0.1); +sleep(0.1) +0 +set @a=now(6); +select sleep(0.1); +sleep(0.1) +0 +update t1 set v=100 where id=1; +set autocommit=0; +select * from t1 as of timestamp @a; +id v +1 10 +set tidb_txn_mode = default; +set tx_isolation = default; +set autocommit = default; diff --git a/tests/integrationtest/r/executor/statement_context.result b/tests/integrationtest/r/executor/statement_context.result new file mode 100644 index 0000000000000..a113da7b54585 --- /dev/null +++ b/tests/integrationtest/r/executor/statement_context.result @@ -0,0 +1,105 @@ +drop table if exists sc, sc2, sc3; +create table sc (a int); +insert sc values (1), (2); +set sql_mode = 'STRICT_TRANS_TABLES'; +select * from sc where a > cast(1.1 as decimal); +a +2 +update sc set a = 4 where a > cast(1.1 as decimal); +set sql_mode = ''; +update sc set a = 3 where a > cast(1.1 as decimal); +select * from sc; +a +1 +3 +set sql_mode = 'STRICT_TRANS_TABLES'; +delete from sc; +insert sc values ('1.8'+1); +select * from sc; +a +3 +select * from sc where a > '1x'; +a +3 +set sql_mode = ''; +update sc set a = 4 where a > '1x'; +delete from sc where a < '1x'; +select * from sc where a > '1x'; +a +4 +create table sc2 (a varchar(255)); +insert sc2 values (unhex('4040ffff')); +select @@warning_count > 0; +@@warning_count > 0 +1 +select * from sc2; +a +@@ +set sql_mode = 'STRICT_TRANS_TABLES'; +insert sc2 values (unhex('4040ffff')); +Error 1366 (HY000): Incorrect string value '\xFF' for column 'a' +set @@tidb_skip_utf8_check = '1'; +insert sc2 values (unhex('4040ffff')); +select length(a) from sc2; +length(a) +2 +4 +set @@tidb_skip_utf8_check = '0'; +insert sc2 values ('�'); +create table sc3 (a varchar(255)) charset ascii; +set sql_mode = ''; +insert sc3 values (unhex('4040ffff')); +select @@warning_count > 0; +@@warning_count > 0 +1 +select * from sc3; +a +@@ +set sql_mode = 'STRICT_TRANS_TABLES'; +insert sc3 values (unhex('4040ffff')); +Error 1366 (HY000): Incorrect string value '\xFF\xFF' for column 'a' +set @@tidb_skip_ascii_check = '1'; +insert sc3 values (unhex('4040ffff')); +select length(a) from sc3; +length(a) +2 +4 +set @@tidb_skip_ascii_check = '0'; +insert sc3 values (unhex('4040')); +set sql_mode = ''; +drop table if exists t1; +create table t1(a varchar(100) charset utf8); +insert t1 values (unhex('f09f8c80')); +select @@warning_count > 0; +@@warning_count > 0 +1 +select * from t1; +a + +insert t1 values (unhex('4040f09f8c80')); +select @@warning_count > 0; +@@warning_count > 0 +1 +select * from t1; +a + +@@ +select length(a) from t1; +length(a) +0 +2 +set sql_mode = 'STRICT_TRANS_TABLES'; +insert t1 values (unhex('f09f8c80')); +Error 1366 (HY000): Incorrect string value '\xF0\x9F\x8C\x80' for column 'a' +insert t1 values (unhex('F0A48BAE')); +Error 1366 (HY000): Incorrect string value '\xF0\xA4\x8B\xAE' for column 'a' +set global tidb_check_mb4_value_in_utf8 = false; +insert t1 values (unhex('f09f8c80')); +set global tidb_check_mb4_value_in_utf8 = true; +insert t1 values (unhex('F0A48BAE')); +Error 1366 (HY000): Incorrect string value '\xF0\xA4\x8B\xAE' for column 'a' +drop table if exists t1; +set global tidb_check_mb4_value_in_utf8 = default; +set sql_mode = default; +set @@tidb_skip_ascii_check = default; +set @@tidb_skip_utf8_check = default; diff --git a/tests/integrationtest/r/executor/update.result b/tests/integrationtest/r/executor/update.result new file mode 100644 index 0000000000000..5accbf8f0f70e --- /dev/null +++ b/tests/integrationtest/r/executor/update.result @@ -0,0 +1,386 @@ +drop table if exists t; +create table t(a bigint, b bigint as (a+1)); +begin; +insert into t(a) values(1); +update t set b=6 where b=2; +Error 3105 (HY000): The value specified for generated column 'b' in table 't' is not allowed. +commit; +select * from t; +a b +1 2 +drop table if exists t1, t2, t3; +create table t1(id int primary key auto_increment, n int); +create table t2(id int primary key, n float auto_increment, key I_n(n)); +create table t3(id int primary key, n double auto_increment, key I_n(n)); +insert into t1 set n = 1; +select * from t1 where id = 1; +id n +1 1 +update t1 set id = id+1; +select * from t1 where id = 2; +id n +2 1 +insert into t1 set n = 2; +select * from t1 where id = 3; +id n +3 2 +update t1 set id = id + '1.1' where id = 3; +select * from t1 where id = 4; +id n +4 2 +insert into t1 set n = 3; +select * from t1 where id = 5; +id n +5 3 +update t1 set id = id + '0.5' where id = 5; +select * from t1 where id = 6; +id n +6 3 +insert into t1 set n = 4; +select * from t1 where id = 7; +id n +7 4 +insert into t2 set id = 1; +select * from t2 where id = 1; +id n +1 1 +update t2 set n = n+1; +select * from t2 where id = 1; +id n +1 2 +insert into t2 set id = 2; +select * from t2 where id = 2; +id n +2 3 +update t2 set n = n + '2.2'; +select * from t2 where id = 2; +id n +2 5.2 +insert into t2 set id = 3; +select * from t2 where id = 3; +id n +3 6 +update t2 set n = n + '0.5' where id = 3; +select * from t2 where id = 3; +id n +3 6.5 +insert into t2 set id = 4; +select * from t2 where id = 4; +id n +4 7 +insert into t3 set id = 1; +select * from t3 where id = 1; +id n +1 1 +update t3 set n = n+1; +select * from t3 where id = 1; +id n +1 2 +insert into t3 set id = 2; +select * from t3 where id = 2; +id n +2 3 +update t3 set n = n + '3.3'; +select * from t3 where id = 2; +id n +2 6.3 +insert into t3 set id = 3; +select * from t3 where id = 3; +id n +3 7 +update t3 set n = n + '0.5' where id = 3; +select * from t3 where id = 3; +id n +3 7.5 +insert into t3 set id = 4; +select * from t3 where id = 4; +id n +4 8 +drop table if exists t; +drop database if exists test2; +create database test2; +create table t(a int, b int generated always as (a+1) virtual); +create table test2.t(a int, b int generated always as (a+1) virtual); +update t, test2.t set executor__update.t.a=1; +drop database test2; +drop table if exists t1, t2; +create table t1 (c_str varchar(40)); +create table t2 (c_str varchar(40)); +insert into t1 values ('Alice'); +insert into t2 values ('Bob'); +select t1.c_str, t2.c_str from t1, t2 where t1.c_str <= t2.c_str; +c_str c_str +Alice Bob +update t1, t2 set t1.c_str = t2.c_str, t2.c_str = t1.c_str where t1.c_str <= t2.c_str; +select t1.c_str, t2.c_str from t1, t2 where t1.c_str <= t2.c_str; +c_str c_str +drop table if exists t; +create table t (a int, b int); +insert into t values(1, 2); +select * from t; +a b +1 2 +update t set a=b, b=a; +select * from t; +a b +2 1 +drop table if exists t; +create table t (a int, b int); +insert into t values (1,3); +select * from t; +a b +1 3 +update t set a=b, b=a; +select * from t; +a b +3 1 +drop table if exists t; +create table t (a int, b int, c int as (-a) virtual, d int as (-b) stored); +insert into t(a, b) values (10, 11), (20, 22); +select * from t; +a b c d +10 11 -10 -11 +20 22 -20 -22 +update t set a=b, b=a; +select * from t; +a b c d +11 10 -11 -10 +22 20 -22 -20 +update t set b=30, a=b; +select * from t; +a b c d +10 30 -10 -30 +20 30 -20 -30 +drop table if exists t; +create table t(x int, y int); +insert into t values(); +update t t1, t t2 set t2.y=1, t1.x=2; +select * from t; +x y +2 1 +update t t1, t t2 set t1.x=t2.y, t2.y=t1.x; +select * from t; +x y +1 2 +drop table if exists t; +create table t(x int, y int, z int as (x+10) stored, w int as (y-10) virtual); +insert into t(x, y) values(1, 2), (3, 4); +update t t1, t t2 set t2.y=1, t1.x=2 where t1.x=1; +select * from t; +x y z w +2 1 12 -9 +3 1 13 -9 +update t t1, t t2 set t1.x=5, t2.y=t1.x where t1.x=3; +select * from t; +x y z w +2 3 12 -7 +5 3 15 -7 +drop table if exists t; +create table t(a int, b int, c int as (a+b) stored); +insert into t(a, b) values (1, 2); +update t t1, t t2 set t2.a=3; +select * from t; +a b c +3 2 5 +update t t1, t t2 set t1.a=4, t2.b=5; +select * from t; +a b c +4 5 9 +drop table if exists t; +create table t (a int primary key); +insert into t values (1), (2); +update t set a=a+2; +select * from t; +a +3 +4 +update t m, t n set m.a = n.a+10 where m.a=n.a; +select * from t; +a +13 +14 +drop table if exists t; +create table t (a int primary key, b int); +insert into t values (1,3), (2,4); +update t m, t n set m.a = n.a+10, n.b = m.b+1 where m.a=n.a; +Error 1706 (HY000): Primary key/partition key update is not allowed since the table is updated both as 'm' and 'n'. +drop table if exists t; +create table t (a int, b int, c int, primary key(a, b)); +insert into t values (1,3,5), (2,4,6); +update t m, t n set m.a = n.a+10, m.b = n.b+10 where m.a=n.a; +select * from t; +a b c +11 13 5 +12 14 6 +update t m, t n, t q set q.c=m.a+n.b, n.c = m.a+1, m.c = n.b+1 where m.b=n.b AND m.a=q.a; +select * from t; +a b c +11 13 24 +12 14 26 +update t m, t n, t q set m.a = m.a+1, n.c = n.c-1, q.c = q.a+q.b where m.b=n.b and n.b=q.b; +Error 1706 (HY000): Primary key/partition key update is not allowed since the table is updated both as 'm' and 'n'. +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(id varchar(200) primary key, v int); +insert into t(id, v) values ('abc', 233); +select id, v from t where id = 'abc'; +id v +abc 233 +update t set id = 'dfg' where id = 'abc'; +select * from t; +id v +dfg 233 +update t set id = 'aaa', v = 333 where id = 'dfg'; +select * from t where id = 'aaa'; +id v +aaa 333 +update t set v = 222 where id = 'aaa'; +select * from t where id = 'aaa'; +id v +aaa 222 +insert into t(id, v) values ('bbb', 111); +update t set id = 'bbb' where id = 'aaa'; +Error 1062 (23000): Duplicate entry 'bbb' for key 't.PRIMARY' +drop table if exists ut3pk; +create table ut3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into ut3pk(id1, id2, v, id3) values ('aaa', 'bbb', 233, 111); +select id1, id2, id3, v from ut3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +id1 id2 id3 v +aaa bbb 111 233 +update ut3pk set id1 = 'abc', id2 = 'bbb2', id3 = 222, v = 555 where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +select id1, id2, id3, v from ut3pk where id1 = 'abc' and id2 = 'bbb2' and id3 = 222; +id1 id2 id3 v +abc bbb2 222 555 +select id1, id2, id3, v from ut3pk; +id1 id2 id3 v +abc bbb2 222 555 +update ut3pk set v = 666 where id1 = 'abc' and id2 = 'bbb2' and id3 = 222; +select id1, id2, id3, v from ut3pk; +id1 id2 id3 v +abc bbb2 222 666 +insert into ut3pk(id1, id2, id3, v) values ('abc', 'bbb3', 222, 777); +update ut3pk set id2 = 'bbb3' where id1 = 'abc' and id2 = 'bbb2' and id3 = 222; +Error 1062 (23000): Duplicate entry 'abc-bbb3-222' for key 'ut3pk.PRIMARY' +drop table if exists ut1pku; +create table ut1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into ut1pku(id, uk, v) values('a', 1, 2), ('b', 2, 3); +select * from ut1pku; +id uk v +a 1 2 +b 2 3 +update ut1pku set uk = 3 where id = 'a'; +select * from ut1pku; +id uk v +a 3 2 +b 2 3 +update ut1pku set uk = 2 where id = 'a'; +Error 1062 (23000): Duplicate entry '2' for key 'ut1pku.ukk' +select * from ut1pku; +id uk v +a 3 2 +b 2 3 +drop table if exists t; +create table t(a char(10) primary key, b char(10)); +insert into t values('a', 'b'); +update t set a='c' where t.a='a' and b='b'; +select * from t; +a b +c b +drop table if exists s; +create table s (a int, b int, c int, primary key (a, b)); +insert s values (3, 3, 3), (5, 5, 5); +update s set c = 10 where a = 3; +select * from s; +a b c +3 3 10 +5 5 5 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(id varchar(200) primary key, v int); +insert into t(id, v) values ('abc', 233); +delete from t where id = 'abc'; +select * from t; +id v +select * from t where id = 'abc'; +id v +drop table if exists it3pk; +create table it3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into it3pk(id1, id2, v, id3) values ('aaa', 'bbb', 233, 111); +delete from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +select * from it3pk; +id1 id2 v id3 +select * from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +id1 id2 v id3 +insert into it3pk(id1, id2, v, id3) values ('aaa', 'bbb', 433, 111); +select * from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +id1 id2 v id3 +aaa bbb 433 111 +drop table if exists dt3pku; +create table dt3pku(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt3pku(id, uk, v) values('a', 1, 2); +delete from dt3pku where id = 'a'; +select * from dt3pku; +id uk v +insert into dt3pku(id, uk, v) values('a', 1, 2); +drop table if exists s1; +create table s1 (a int, b int, c int, primary key (a, b)); +insert s1 values (3, 3, 3), (5, 5, 5); +delete from s1 where a = 3; +select * from s1; +a b c +5 5 5 +set tidb_enable_clustered_index = default; +set tidb_enable_clustered_index = on; +drop table if exists rt1pk; +create table rt1pk(id varchar(200) primary key, v int); +replace into rt1pk(id, v) values('abc', 1); +select * from rt1pk; +id v +abc 1 +replace into rt1pk(id, v) values('bbb', 233), ('abc', 2); +select * from rt1pk; +id v +abc 2 +bbb 233 +drop table if exists rt3pk; +create table rt3pk(id1 timestamp, id2 time, v int, id3 year, primary key(id1, id2, id3)); +replace into rt3pk(id1, id2,id3, v) values('2018-01-01 11:11:11', '22:22:22', '2019', 1); +select * from rt3pk; +id1 id2 v id3 +2018-01-01 11:11:11 22:22:22 1 2019 +replace into rt3pk(id1, id2, id3, v) values('2018-01-01 11:11:11', '22:22:22', '2019', 2); +select * from rt3pk; +id1 id2 v id3 +2018-01-01 11:11:11 22:22:22 2 2019 +drop table if exists rt1pk1u; +create table rt1pk1u(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +replace into rt1pk1u(id, uk, v) values("abc", 2, 1); +select * from rt1pk1u; +id uk v +abc 2 1 +replace into rt1pk1u(id, uk, v) values("aaa", 2, 11); +select * from rt1pk1u; +id uk v +aaa 2 11 +set tidb_enable_clustered_index = default; +drop table if exists t; +create table t(ts int(10) unsigned NULL DEFAULT NULL); +insert into t values(1); +update t set ts = IF(ts < (0 - ts), 1,1) where ts>0; +Error 1690 (22003): BIGINT UNSIGNED value is out of range in '(0 - executor__update.t.ts)' +drop table if exists tt; +create table tt (m0 varchar(64), status tinyint not null); +insert into tt values('1',0),('1',0),('1',0); +update tt a inner join (select m0 from tt where status!=1 group by m0 having count(*)>1) b on a.m0=b.m0 set a.status=1; +drop table if exists t1; +create table t1(id int, a int unsigned); +set sql_mode=''; +insert into t1 values(1, 10), (2, 20); +update t1 set a='-1' where id=1; +update t1 set a='1000000000000000000' where id=2; +select id, a from t1 order by id asc; +id a +1 0 +2 4294967295 +set sql_mode=default; diff --git a/tests/integrationtest/r/executor/window.result b/tests/integrationtest/r/executor/window.result new file mode 100644 index 0000000000000..967ec034c8692 --- /dev/null +++ b/tests/integrationtest/r/executor/window.result @@ -0,0 +1,36 @@ +drop table if exists tbl_2; +create table tbl_2 ( col_10 char(65) collate utf8mb4_unicode_ci not null , col_11 bigint not null , col_12 datetime not null , col_13 bigint unsigned default 327695751717730004 , col_14 timestamp default '2010-11-18' not null , primary key idx_5 ( col_11,col_13 ) /*T![clustered_index] clustered */ , unique key idx_6 ( col_10,col_11,col_13 ) , unique key idx_7 ( col_14,col_12,col_13 ) ); +insert into tbl_2 values ( 'RmF',-5353757041350034197,'1996-01-22',1866803697729291364,'1996-09-11' ); +insert into tbl_2 values ( 'xEOGaB',-6602924241498980347,'2019-02-22',8297270320597030697,'1972-04-04' ); +insert into tbl_2 values ( 'dvUztqgTPAhLdzgEsV',3316448219481769821,'2034-09-12',937089564901142512,'2030-12-04' ); +insert into tbl_2 values ( 'mNoyfbT',-6027094365061219400,'2035-10-10',1752804734961508175,'1992-08-09' ); +insert into tbl_2 values ( 'BDPJMhLYXuKB',6823702503458376955,'2015-04-09',737914379167848827,'2026-04-29' ); +insert into tbl_2 values ( 'WPiaVfPstGohvHd',1308183537252932688,'2020-05-03',5364104746649397703,'1979-01-28' ); +insert into tbl_2 values ( 'lrm',4642935044097656317,'1973-04-29',149081313305673035,'2013-02-03' ); +insert into tbl_2 values ( '',-7361040853169906422,'2024-10-22',6308270832310351889,'1981-02-01' ); +insert into tbl_2 values ( 'uDANahGcLwpSssabD',2235074865448210231,'1992-10-10',7140606140672586593,'1992-11-25' ); +insert into tbl_2 values ( 'TDH',-1911014243756021618,'2013-01-26',2022218243939205750,'1982-04-04' ); +select lead(col_13,1,NULL) over w from tbl_2 window w as (order by col_13); +lead(col_13,1,NULL) over w +737914379167848827 +937089564901142512 +1752804734961508175 +1866803697729291364 +2022218243939205750 +5364104746649397703 +6308270832310351889 +7140606140672586593 +8297270320597030697 +NULL +drop table if exists t_tir89b, t_vejdy; +CREATE TABLE `t_tir89b` (`c_3pcik` int(11) DEFAULT NULL,`c_0b6nxb` text DEFAULT NULL,`c_qytrlc` double NOT NULL,`c_sroc_c` int(11) DEFAULT NULL,PRIMARY KEY (`c_qytrlc`) /*T![clustered_index] NONCLUSTERED */ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +INSERT INTO t_tir89b VALUES (66,'cjd1o',87.77,NULL),(134217728,'d_unpd',76.66,NULL),(50,'_13gs',1.46,32),(49,'xclvsc',64.7,48),(7,'1an13',70.86,7),(29,NULL,6.26,6),(8,'hc485b',47.44,2),(84,'d_nlmd',99.3,76),(14,'lbny1c',61.1,47),(45,'9r5bid',25.37,95),(49,'jbz5r',72.99,49),(18,'uode3d',7.21,992),(-8945040,'ftrtib',47.47,20),(29,'algrj',6.28,24),(96,NULL,67.83,24),(5,'s1gfz',89.18,78),(74,'ggqbl',83.89,68),(61,'5n1q7',26.92,6),(10,'4gflb',33.84,28),(48,'xoe0cd',84.71,77),(6,'xkh6i',53.83,19),(5,NULL,89.1,46),(49,'4q6nx',31.5,384),(1,'pgs1',66.8,77),(19,'lltflc',33.49,63),(87,'vd4htc',39.92,-5367008),(47,NULL,28.3,10),(29,'15jqfc',100.11,64),(45,'ii6pm',52.41,61),(0,NULL,85.27,19),(104,'ikpxnb',40.66,955),(40,'gzryzd',36.23,42),(18,'7UPNE',84.27,14),(32,NULL,84.8,53),(51,'2c5lfb',18.98,74),(97,NULL,22.89,6),(70,'guyzyc',96.29,89),(34,'dvdoqb',53.82,1),(94,'6eop6b',81.77,90),(42,'p7vsnd',62.54,NULL); +CREATE TABLE `t_vejdy` (`c_iovir` int(11) NOT NULL,`c_r_mw3d` double DEFAULT NULL,`c_uxhghb` int(11) DEFAULT NULL,`c_rb7otb` int(11) NOT NULL,`c_dplyac` int(11) DEFAULT NULL,`c_lmcqed` double DEFAULT NULL,`c_ayaoed` text DEFAULT NULL,`c__zbqr` int(11) DEFAULT NULL,PRIMARY KEY (`c_iovir`,`c_rb7otb`) /*T![clustered_index] NONCLUSTERED */,KEY `t_e1ejcd` (`c_uxhghb`),KEY `t_o6ui_b` (`c_iovir`,`c_r_mw3d`,`c_uxhghb`,`c_rb7otb`,`c_dplyac`,`c_lmcqed`)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +INSERT INTO t_vejdy VALUES (49,100.11,68,57,44,17.93,NULL,84),(38,56.91,78,30,0,53.28,'cjd1o',2),(6,NULL,NULL,88,81,93.47,'0jftkb',54),(73,91.51,31,82,3,38.12,'buesob',40),(7,26.73,7,78,9,NULL,'fd5kgd',49),(80,70.57,4,47,43,25.59,'glpoq',44),(79,94.16,15,0,0,79.55,'0ok94d',56),(58,NULL,50,69,2,65.46,'sm6rj',29),(41472,6.51,70,1080,100,43.18,'fofk4c',43),(0,6.2,57,97,2,56.17,'zqpzq',56),(72,76.66,97,88,95,75.47,'hikxqb',34),(27,1.11,134217728,57,25,NULL,'4gflb',0),(64,NULL,47,69,6,72.5,'w7jmhd',45),(-134217679,88.74,33,82,85,59.89,NULL,26),(59,97.98,37,28,33,61.1,'xioxdd',45),(6,47.31,0,0,-19,38.77,'uxmdlc',17),(82,28.62,36,70,39,11.79,'zzi8cc',2),(33,37.3,55,86,69,60.56,'mn_xx',0),(7,NULL,80,0,17,59.79,'5n1q7',97),(88,50.81,15,30,63,25.37,'ordwed',29),(48,4.32,90,48,38,84.62,'lclx',32),(10,NULL,95,75,1,21.64,NULL,85),(62,NULL,0,30,10,NULL,'7bacud',5),(50,38.81,6,0,6,64.28,'gpibn',57),(1,46.8,21,32,46,33.38,NULL,6),(29,NULL,38,7,91,31.5,'pdzdl',24),(54,6.26,1,85,22,75.63,'gl4_7',29),(1,90.37,63,63,6,61.2,'wvw23b',86),(47,NULL,82,73,0,95.79,'uipcf',NULL),(46,48.1,37,6,1,52.33,'gthpic',0),(41,75.1,7,44,5,84.16,'fe_e5',58),(43,87.71,81,32,28,91.98,'9e5nvc',66),(20,58.21,88,75,92,43.64,'kagroc',66),(91,52.75,22,14,80,NULL,'\'_YN6MD\'',6),(72,94.83,0,49,5,57.82,NULL,23),(7,100.11,0,92,13,6.28,NULL,0); +begin; +delete from t_tir89b where t_tir89b.c_3pcik >= t_tir89b.c_sroc_c; +select * from (select count(*) over (partition by ref_0.c_0b6nxb order by ref_0.c_3pcik) as c0 from t_tir89b as ref_0) as subq_0 where subq_0.c0 <> 1; +c0 +2 +3 +commit; diff --git a/tests/integrationtest/r/expression/charset_and_collation.result b/tests/integrationtest/r/expression/charset_and_collation.result index 551c015572d6f..0ef76c5943d6b 100644 --- a/tests/integrationtest/r/expression/charset_and_collation.result +++ b/tests/integrationtest/r/expression/charset_and_collation.result @@ -745,6 +745,9 @@ a b c d e f g h i 1 啊 啊 啊 啊 啊 啊 🐸 🐸 admin check table t; +admin recover index t a; +ADDED_COUNT SCAN_COUNT +0 1 alter table t add column n char(10) COLLATE utf8mb4_unicode_ci; alter table t add index n(n); update t set n = '吧'; diff --git a/tests/integrationtest/r/expression/json.result b/tests/integrationtest/r/expression/json.result index 8a1472456f31d..243a8388b0ab9 100644 --- a/tests/integrationtest/r/expression/json.result +++ b/tests/integrationtest/r/expression/json.result @@ -602,3 +602,18 @@ json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to last]') select json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to 2]'); json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to 2]') [1, 2, 3] +drop table if exists t; +create table t (a json); +insert into t values ('"-1"'); +insert into t values ('"18446744073709551615"'); +insert into t values ('"18446744073709552000"'); +select a, cast(a as unsigned) from t; +a cast(a as unsigned) +"-1" 18446744073709551615 +"18446744073709551615" 18446744073709551615 +"18446744073709552000" 18446744073709551615 +select a, cast(a as signed) from t; +a cast(a as signed) +"-1" -1 +"18446744073709551615" -1 +"18446744073709552000" -1 diff --git a/tests/integrationtest/r/infoschema/infoschema.result b/tests/integrationtest/r/infoschema/infoschema.result index 2d704df1dcc23..afae9899d9d66 100644 --- a/tests/integrationtest/r/infoschema/infoschema.result +++ b/tests/integrationtest/r/infoschema/infoschema.result @@ -25,6 +25,7 @@ select count(length(query)) from information_schema.ddl_jobs; count(length(query)) x drop table if EXISTS t1; +drop table if EXISTS mysql.t1, mysql.t2, mysql.t3; create table infoschema__infoschema.t1 (id int primary key, a text); insert infoschema__infoschema.t1 values(1,'334'),(4,'3443435'),(5,'fdf43t536653'); rename table infoschema__infoschema.t1 to mysql.t1; diff --git a/tests/integrationtest/r/planner/core/issuetest/planner_issue.result b/tests/integrationtest/r/planner/core/issuetest/planner_issue.result index 054e04292eaab..5d95672abacb7 100644 --- a/tests/integrationtest/r/planner/core/issuetest/planner_issue.result +++ b/tests/integrationtest/r/planner/core/issuetest/planner_issue.result @@ -121,3 +121,62 @@ select /*+ use_index_merge( tbl_39) */ col_239 from tbl_39 where not( tbl_39.c col_239 1994 1997 +drop table if exists t, t1, t2; +create table t (id int,name varchar(10)); +insert into t values(1,'tt'); +create table t1(id int,name varchar(10),name1 varchar(10),name2 varchar(10)); +insert into t1 values(1,'tt','ttt','tttt'),(2,'dd','ddd','dddd'); +create table t2(id int,name varchar(10),name1 varchar(10),name2 varchar(10),`date1` date); +insert into t2 values(1,'tt','ttt','tttt','2099-12-31'),(2,'dd','ddd','dddd','2099-12-31'); +WITH bzzs AS ( +SELECT +count(1) AS bzn +FROM +t c +), +tmp1 AS ( +SELECT +t1.* +FROM +t1 +LEFT JOIN bzzs ON 1 = 1 +WHERE +name IN ('tt') +AND bzn <> 1 +), +tmp2 AS ( +SELECT +tmp1.*, +date('2099-12-31') AS endate +FROM +tmp1 +), +tmp3 AS ( +SELECT +* +FROM +tmp2 +WHERE +endate > CURRENT_DATE +UNION ALL +SELECT +'1' AS id, +'ss' AS name, +'sss' AS name1, +'ssss' AS name2, +date('2099-12-31') AS endate +FROM +bzzs t1 +WHERE +bzn = 1 +) +SELECT +c2.id, +c3.id +FROM +t2 db +LEFT JOIN tmp3 c2 ON c2.id = '1' +LEFT JOIN tmp3 c3 ON c3.id = '1'; +id id +1 1 +1 1 diff --git a/tests/integrationtest/r/planner/core/tests/prepare/issue.result b/tests/integrationtest/r/planner/core/tests/prepare/issue.result index 34c3af078411e..d56e2ab7c6ca8 100644 --- a/tests/integrationtest/r/planner/core/tests/prepare/issue.result +++ b/tests/integrationtest/r/planner/core/tests/prepare/issue.result @@ -287,6 +287,8 @@ count(*) select @@last_plan_from_cache; @@last_plan_from_cache 0 +set tidb_enable_prepared_plan_cache=DEFAULT; +set @@tidb_enable_collect_execution_info=DEFAULT; set tidb_enable_prepared_plan_cache=1; set tidb_enable_clustered_index=on; drop table if exists PK_TCOLLATION10197; diff --git a/tests/integrationtest/r/privilege/privileges.result b/tests/integrationtest/r/privilege/privileges.result index 878644ddaecbf..f34df558730dd 100644 --- a/tests/integrationtest/r/privilege/privileges.result +++ b/tests/integrationtest/r/privilege/privileges.result @@ -30,6 +30,7 @@ Error 1142 (42000): DELETE command denied to user 'testnotexist'@'localhost' for DELETE FROM dbnotexists.t1; Error 1142 (42000): DELETE command denied to user 'testnotexist'@'localhost' for table 't1' drop table t1; +drop user if exists u1; CREATE USER u1; CREATE TABLE routine_table (a int); GRANT CREATE ROUTINE on privilege__privileges.* to u1; @@ -156,6 +157,7 @@ DROP USER lock_tables_user; CREATE USER 'nofile'@'localhost'; select 1 into outfile '/tmp/doesntmatter-no-permissions'; Error 1227 (42000): Access denied; you need (at least one of) the FILE privilege(s) for this operation +drop user if exists u4, 'hasgrant', 'withoutgrant'; CREATE USER 'hasgrant'; CREATE USER 'withoutgrant'; GRANT ALL ON *.* TO 'hasgrant'; @@ -231,6 +233,8 @@ prepare s from 'select * from privilege__privileges.t'; execute s; a 1 +drop user if exists tcd1, tcd2, tcd3, usr1; +drop resource group if exists rg1; CREATE USER tcd1, tcd2; GRANT ALL ON *.* to tcd2 WITH GRANT OPTION; CREATE USER acdc; @@ -583,3 +587,4 @@ GRANT SELECT ON `test`.* TO 'joe'@'%' GRANT UPDATE ON `role`.* TO 'joe'@'%' GRANT SELECT,DELETE ON `mysql`.`user` TO 'joe'@'%' GRANT 'admins'@'%', 'engineering'@'%', 'otherrole'@'%' TO 'joe'@'%' +set global tidb_enable_resource_control = default; diff --git a/tests/integrationtest/run-tests.sh b/tests/integrationtest/run-tests.sh index 40e05b91cab38..9906318f60819 100755 --- a/tests/integrationtest/run-tests.sh +++ b/tests/integrationtest/run-tests.sh @@ -78,9 +78,9 @@ function build_tidb_server() echo "building tidb-server binary: $tidb_server" rm -rf $tidb_server if [ "${TIDB_TEST_STORE_NAME}" = "tikv" ]; then - GO111MODULE=on go build -o $tidb_server github.com/pingcap/tidb/tidb-server + GO111MODULE=on go build -o $tidb_server github.com/pingcap/tidb/cmd/tidb-server else - GO111MODULE=on go build -race -o $tidb_server github.com/pingcap/tidb/tidb-server + GO111MODULE=on go build -race -o $tidb_server github.com/pingcap/tidb/cmd/tidb-server fi } @@ -264,7 +264,6 @@ enabled_new_collation="" if [[ $collation_opt = 0 || $collation_opt = 2 ]]; then enabled_new_collation=0 start_tidb_server - sleep 5 run_mysql_tester kill -15 $SERVER_PID while ps -p $SERVER_PID > /dev/null; do @@ -276,7 +275,6 @@ fi if [[ $collation_opt = 1 || $collation_opt = 2 ]]; then enabled_new_collation=1 start_tidb_server - sleep 5 run_mysql_tester kill -15 $SERVER_PID while ps -p $SERVER_PID > /dev/null; do diff --git a/tests/integrationtest/t/ddl/db_partition.test b/tests/integrationtest/t/ddl/db_partition.test index ea88dfa63b86e..3f8389edbdac8 100644 --- a/tests/integrationtest/t/ddl/db_partition.test +++ b/tests/integrationtest/t/ddl/db_partition.test @@ -851,6 +851,9 @@ PARTITION BY RANGE COLUMNS(d) -- error 1736 alter table t1p exchange partition p202307 with table t1; insert into t1 values ("2023-08-06","0000"); +drop table t1, t1p; +drop placement policy rule1; +drop placement policy rule2; # TestTiDBEnableExchangePartition drop table if exists pt; diff --git a/tests/integrationtest/t/executor/adapter.test b/tests/integrationtest/t/executor/adapter.test new file mode 100644 index 0000000000000..05fa9a2a50b6f --- /dev/null +++ b/tests/integrationtest/t/executor/adapter.test @@ -0,0 +1,9 @@ +# TestQueryTime +set @a = now(6); +drop table if exists t; +create table t(a int); +insert into t values (1), (1), (1), (1), (1); +select * from t t1 join t t2 on t1.a = t2.a; +## should less than 1 second +select timestampdiff(microsecond, @a, now(6)) < 1000000; + diff --git a/tests/integrationtest/t/executor/admin.test b/tests/integrationtest/t/executor/admin.test index 57605b8d08ec3..fcb9eeb5b5065 100644 --- a/tests/integrationtest/t/executor/admin.test +++ b/tests/integrationtest/t/executor/admin.test @@ -84,3 +84,145 @@ drop table if exists t; create table t(a bigint unsigned primary key, b int, c int, index idx(a, b)); insert into t values(1, 1, 1), (9223372036854775807, 2, 2); admin check index t idx; + +# TestAdminCheckTable +drop table if exists test_null; +CREATE TABLE test_null ( + a int(11) NOT NULL, + c int(11) NOT NULL, + PRIMARY KEY (a, c), + KEY idx_a (a) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin; +insert into test_null(a, c) values(2, 2); +ALTER TABLE test_null ADD COLUMN b int NULL DEFAULT '1795454803' AFTER a; +ALTER TABLE test_null add index b(b); +ADMIN CHECK TABLE test_null; +drop table if exists test; +create table test ( + a time, + PRIMARY KEY (a) +); +insert into test set a='12:10:36'; +admin check table test; +drop table if exists test; +CREATE TABLE test ( a decimal, PRIMARY KEY (a)); +insert into test set a=10; +admin check table test; +drop table if exists test; +create table test ( a TIMESTAMP, primary key(a) ); +insert into test set a='2015-08-10 04:18:49'; +admin check table test; +drop table if exists test; +create table test ( + a int not null, + c int not null, + primary key (a, c), +key idx_a (a)) partition by range (c) ( + partition p1 values less than (1), + partition p2 values less than (4), + partition p3 values less than (7), + partition p4 values less than (11)); +insert into test values (1, 1); +insert into test values (2, 2); +insert into test values (3, 3); +insert into test values (4, 4); +insert into test values (5, 5); +insert into test values (6, 6); +insert into test values (7, 7); +insert into test values (8, 8); +insert into test values (9, 9); +insert into test values (10, 10); +admin check table test; +drop table if exists test; +create table test ( b json , c int as (JSON_EXTRACT(b,'$.d')), index idxc(c)); +INSERT INTO test set b='{"d": 100}'; +admin check table test; +drop table if exists t; +CREATE TABLE t ( + ID CHAR(32) NOT NULL, + name CHAR(32) NOT NULL, + value CHAR(255), + INDEX indexIDname (ID(8),name(8)) +); +INSERT INTO t VALUES ('keyword','urlprefix','text/ /text'); +admin check table t; +use mysql; +admin check table executor__admin.t; +-- error 1146 +admin check table t; +use executor__admin; +drop table if exists t1; +CREATE TABLE t1 (c2 YEAR, PRIMARY KEY (c2)); +INSERT INTO t1 SET c2 = '1912'; +ALTER TABLE t1 ADD COLUMN c3 TIMESTAMP NULL DEFAULT '1976-08-29 16:28:11'; +ALTER TABLE t1 ADD COLUMN c4 DATE NULL DEFAULT '1976-08-29'; +ALTER TABLE t1 ADD COLUMN c5 TIME NULL DEFAULT '16:28:11'; +ALTER TABLE t1 ADD COLUMN c6 YEAR NULL DEFAULT '1976'; +ALTER TABLE t1 ADD INDEX idx1 (c2, c3,c4,c5,c6); +ALTER TABLE t1 ADD INDEX idx2 (c2); +ALTER TABLE t1 ADD INDEX idx3 (c3); +ALTER TABLE t1 ADD INDEX idx4 (c4); +ALTER TABLE t1 ADD INDEX idx5 (c5); +ALTER TABLE t1 ADD INDEX idx6 (c6); +admin check table t1; +drop table if exists td1; +CREATE TABLE td1 (c2 INT NULL DEFAULT '70'); +INSERT INTO td1 SET c2 = '5'; +ALTER TABLE td1 ADD COLUMN c4 DECIMAL(12,8) NULL DEFAULT '213.41598062'; +ALTER TABLE td1 ADD INDEX id2 (c4) ; +ADMIN CHECK TABLE td1; +drop table if exists t1; +create table t1 (a int); +insert into t1 set a=2; +alter table t1 add column b timestamp not null; +alter table t1 add index(b); +admin check table t1; +drop table if exists t1; +create table t1 (a decimal(2,1), index(a)); +insert into t1 set a='1.9'; +alter table t1 modify column a decimal(3,2); +delete from t1; +admin check table t1; + +# TestAdminCheckIndexRange +drop table if exists check_index_test; +create table check_index_test (a int, b varchar(10), index a_b (a, b), index b (b)); +insert check_index_test values (3, "ab"),(2, "cd"),(1, "ef"),(-1, "hi"); +admin check index check_index_test a_b (2, 4); +admin check index check_index_test a_b (3, 5); +use mysql; +admin check index executor__admin.check_index_test a_b (2, 3), (4, 5); +use executor__admin; + +# TestAdminCheckIndex +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)); +insert admin_test (c1, c2) values (1, 1), (2, 2), (5, 5), (10, 10), (11, 11), (NULL, NULL); +admin check index admin_test c1; +admin check index admin_test c2; +drop table if exists admin_test; +## for hash partition +create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)) partition by hash(c2) partitions 5; +insert admin_test (c1, c2) values (1, 1), (2, 2), (5, 5), (10, 10), (11, 11), (NULL, NULL); +admin check index admin_test c1; +admin check index admin_test c2; +drop table if exists admin_test; +## for range partition +create table admin_test (c1 int, c2 int, c3 int default 1, index (c1), unique key(c2)) PARTITION BY RANGE ( c2 ) ( + PARTITION p0 VALUES LESS THAN (5), + PARTITION p1 VALUES LESS THAN (10), + PARTITION p2 VALUES LESS THAN (MAXVALUE)); +insert admin_test (c1, c2) values (1, 1), (2, 2), (5, 5), (10, 10), (11, 11), (NULL, NULL); +admin check index admin_test c1; +admin check index admin_test c2; + +# TestAdminCheckTableErrorLocateBigTable +drop table if exists admin_test; +create table admin_test (c1 int, c2 int, primary key(c1), key(c2)); +set cte_max_recursion_depth=100000; +insert into admin_test with recursive cte(a, b) as (select 1, 1 union select a+1, b+1 from cte where cte.a< 100000) select * from cte; +--disable_result_log +select /*+ read_from_storage(tikv[`executor__admin`.`admin_test`]) */ bit_xor(crc32(md5(concat_ws(0x2, `c1`, `c2`)))), ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024), count(*) from `executor__admin`.`admin_test` use index() where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024); +select bit_xor(crc32(md5(concat_ws(0x2, `c1`, `c2`)))), ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024), count(*) from `executor__admin`.`admin_test` use index(`c2`) where 0 = 0 group by ((cast(crc32(md5(concat_ws(0x2, `c1`))) as signed) - 9223372036854775807) div 1 % 1024); +set cte_max_recursion_depth=default; +--enable_result_log diff --git a/tests/integrationtest/t/executor/aggregate.test b/tests/integrationtest/t/executor/aggregate.test index f5b08430bdf3b..61b2368aba47c 100644 --- a/tests/integrationtest/t/executor/aggregate.test +++ b/tests/integrationtest/t/executor/aggregate.test @@ -833,3 +833,237 @@ select a from t group by ((+a)); -- error 1055 select a from t group by (-a); set sql_mode = default; + +# TestGroupConcatAggr +## issue #5411 +drop table if exists test; +create table test(id int, name int); +insert into test values(1, 10); +insert into test values(1, 20); +insert into test values(1, 30); +insert into test values(2, 20); +insert into test values(3, 200); +insert into test values(3, 500); +select id, group_concat(name) from test group by id order by id; +select id, group_concat(name SEPARATOR ';') from test group by id order by id; +select id, group_concat(name SEPARATOR ',') from test group by id order by id; +select id, group_concat(name SEPARATOR '%') from test group by id order by id; +select id, group_concat(name SEPARATOR '') from test group by id order by id; +select id, group_concat(name SEPARATOR '123') from test group by id order by id; +select group_concat(id ORDER BY name) from (select * from test order by id, name limit 2,2) t; +select group_concat(id ORDER BY name desc) from (select * from test order by id, name limit 2,2) t; +select group_concat(name ORDER BY id) from (select * from test order by id, name limit 2,2) t; +select group_concat(name ORDER BY id desc) from (select * from test order by id, name limit 2,2) t; +select group_concat(name ORDER BY name desc SEPARATOR '++') from test; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select group_concat(distinct name order by name desc) from test; +set session group_concat_max_len=4; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=5; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=6; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=7; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=8; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=9; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=10; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=11; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=12; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=13; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=14; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=15; +select group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=4; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=5; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=6; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=7; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=8; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=9; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=10; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=11; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=12; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=13; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=14; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=15; +select group_concat(id ORDER BY name asc, id desc SEPARATOR '--') from test; +select @@warning_count; +set session group_concat_max_len=4; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=5; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=6; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=7; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=8; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=9; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=10; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=11; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=12; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=13; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=14; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=15; +select group_concat(distinct name order by name desc) from test; +select @@warning_count; +set session group_concat_max_len=default; + +## test varchar table +drop table if exists test2; +create table test2(id varchar(20), name varchar(20)); +insert into test2 select * from test; +select group_concat(id ORDER BY name) from (select * from test2 order by id, name limit 2,2) t; +select group_concat(id ORDER BY name desc) from (select * from test2 order by id, name limit 2,2) t; +select group_concat(name ORDER BY id) from (select * from test2 order by id, name limit 2,2) t; +select group_concat(name ORDER BY id desc) from (select * from test2 order by id, name limit 2,2) t; +select group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from test2; + +## test Position Expr +select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 1 desc, id SEPARATOR '++') from test; +select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 2 desc, name SEPARATOR '++') from test; +-- error 1054 +select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY 3 desc, name SEPARATOR '++') from test; + +## test Param Marker +prepare s1 from "select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY floor(id/?) desc, name SEPARATOR '++') from test"; +set @a=2; +execute s1 using @a; +prepare s1 from "select 1, 2, 3, 4, 5 , group_concat(name, id ORDER BY ? desc, name SEPARATOR '++') from test"; +set @a=2; +execute s1 using @a; +set @a=3; +-- error 1054 +execute s1 using @a; +set @a=3.0; +execute s1 using @a; + +## test partition table +drop table if exists ptest; +CREATE TABLE ptest (id int,name int) PARTITION BY RANGE ( id ) (PARTITION `p0` VALUES LESS THAN (2), PARTITION `p1` VALUES LESS THAN (11)); +insert into ptest select * from test; +set session tidb_opt_distinct_agg_push_down = 0; +set session tidb_opt_agg_push_down = 0; +select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest; +select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest; +set session tidb_opt_distinct_agg_push_down = 0; +set session tidb_opt_agg_push_down = 1; +select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest; +select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest; +set session tidb_opt_distinct_agg_push_down = 1; +set session tidb_opt_agg_push_down = 0; +select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest; +select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest; +set session tidb_opt_distinct_agg_push_down = 1; +set session tidb_opt_agg_push_down = 1; +select /*+ agg_to_cop */ group_concat(name ORDER BY name desc SEPARATOR '++'), group_concat(id ORDER BY name desc, id asc SEPARATOR '--') from ptest; +select /*+ agg_to_cop */ group_concat(distinct name order by name desc) from ptest; +set session tidb_opt_distinct_agg_push_down = default; +set session tidb_opt_agg_push_down = default; + +## issue #9920 +select group_concat(123, null); + +## issue #23129 +drop table if exists t1; +create table t1(cid int, sname varchar(100)); +insert into t1 values(1, 'Bob'), (1, 'Alice'); +insert into t1 values(3, 'Ace'); +set @@group_concat_max_len=5; +select group_concat(sname order by sname) from t1 group by cid; +drop table if exists t1; +create table t1(c1 varchar(10)); +insert into t1 values('0123456789'); +insert into t1 values('12345'); +set @@group_concat_max_len=8; +select group_concat(c1 order by c1) from t1 group by c1; +set @@group_concat_max_len=default; + +# TestSelectDistinct +drop table if exists select_distinct_test; +create table select_distinct_test(id int not null default 1, name varchar(255), PRIMARY KEY(id)); +insert INTO select_distinct_test VALUES (1, "hello"); +insert into select_distinct_test values (2, "hello"); +begin; +select distinct name from select_distinct_test; +commit; + +# TestInjectProjBelowTopN +drop table if exists t; +create table t (i int); +insert into t values (1), (1), (1),(2),(3),(2),(3),(2),(3); +explain format = 'brief' select * from t order by i + 1; +select * from t order by i + 1; +explain format = 'brief' select * from t order by i + 1 limit 2; +select * from t order by i + 1 limit 2; +select i, i, i from t order by i + 1; + +# TestIssue12759HashAggCalledByApply +insert into mysql.opt_rule_blacklist value("decorrelate"); +drop table if exists test; +create table test (a int); +insert into test value(1); +select /*+ hash_agg() */ sum(a), (select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1) from test tt; +explain format = 'brief' select /*+ hash_agg() */ sum(a), (select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1),(select NULL from test where tt.a = test.a limit 1) from test tt; +delete from mysql.opt_rule_blacklist where name = "decorrelate"; +admin reload opt_rule_blacklist; + diff --git a/tests/integrationtest/t/executor/analyze.test b/tests/integrationtest/t/executor/analyze.test index 5aeb03207acdd..f4b0aa789ee56 100644 --- a/tests/integrationtest/t/executor/analyze.test +++ b/tests/integrationtest/t/executor/analyze.test @@ -829,3 +829,107 @@ create table t(a int, b int, primary key(a), index idx(b)) partition by range(a) -- error 1105 analyze incremental table t partition p0 index idx; +# TestClusterIndexAnalyze +drop table if exists t; +set tidb_enable_clustered_index=on; +create table t (a int, b int, c int, primary key(a, b)); +insert into t values (0, 0, 0); +insert into t values (1, 1, 1); +insert into t values (2, 2, 2); +insert into t values (3, 3, 3); +insert into t values (4, 4, 4); +insert into t values (5, 5, 5); +insert into t values (6, 6, 6); +insert into t values (7, 7, 7); +insert into t values (8, 8, 8); +insert into t values (9, 9, 9); +analyze table t; +drop table t; +create table t (a varchar(255), b int, c float, primary key(c, a)); +insert into t values (0, 0, 0); +insert into t values (1, 1, 1); +insert into t values (2, 2, 2); +insert into t values (3, 3, 3); +insert into t values (4, 4, 4); +insert into t values (5, 5, 5); +insert into t values (6, 6, 6); +insert into t values (7, 7, 7); +insert into t values (8, 8, 8); +insert into t values (9, 9, 9); +analyze table t; +drop table t; +create table t (a char(10), b decimal(5, 3), c int, primary key(a, c, b)); +insert into t values (0, 0, 0); +insert into t values (1, 1, 1); +insert into t values (2, 2, 2); +insert into t values (3, 3, 3); +insert into t values (4, 4, 4); +insert into t values (5, 5, 5); +insert into t values (6, 6, 6); +insert into t values (7, 7, 7); +insert into t values (8, 8, 8); +insert into t values (9, 9, 9); +analyze table t; +drop table t; +set tidb_enable_clustered_index=default; + +# TestAnlyzeIssue +set @@tidb_analyze_version = 1; +## Issue15993 +drop table if exists t0; +CREATE TABLE t0(c0 INT PRIMARY KEY); +ANALYZE TABLE t0 INDEX PRIMARY; +## Issue15751 +drop table if exists t0; +CREATE TABLE t0(c0 INT, c1 INT, PRIMARY KEY(c0, c1)); +INSERT INTO t0 VALUES (0, 0); +ANALYZE TABLE t0; +## Issue15752 +drop table if exists t0; +CREATE TABLE t0(c0 INT); +INSERT INTO t0 VALUES (0); +CREATE INDEX i0 ON t0(c0); +ANALYZE TABLE t0 INDEX i0; +set @@tidb_analyze_version = default; + +# TestManualAnalyzeSkipColumnTypes +drop table if exists t; +create table t(a int, b int, c json, d text, e mediumtext, f blob, g mediumblob, index idx(d(10))); +set @@session.tidb_analyze_skip_column_types = 'json,blob,mediumblob,text,mediumtext'; +delete from mysql.analyze_jobs; +analyze table t; +select job_info from mysql.analyze_jobs where job_info like '%analyze table%'; +delete from mysql.analyze_jobs; +analyze table t columns a, e; +select job_info from mysql.analyze_jobs where job_info like '%analyze table%'; +set @@session.tidb_analyze_skip_column_types = default; + +# TestIssue34228 +DROP TABLE IF EXISTS Issue34228; +CREATE TABLE Issue34228 (id bigint NOT NULL, dt datetime NOT NULL) PARTITION BY RANGE COLUMNS(dt) (PARTITION p202201 VALUES LESS THAN ("2022-02-01"), PARTITION p202202 VALUES LESS THAN ("2022-03-01")); +INSERT INTO Issue34228 VALUES (1, '2022-02-01 00:00:02'), (2, '2022-02-01 00:00:02'); +SET @@global.tidb_analyze_version = 1; +SET @@session.tidb_partition_prune_mode = 'static'; +ANALYZE TABLE Issue34228; +SET @@session.tidb_partition_prune_mode = 'dynamic'; +ANALYZE TABLE Issue34228; +--sorted_result +SELECT * FROM Issue34228; + +## Needs a second run to hit the issue +connect (conn1, localhost, root,, executor__analyze); +DROP TABLE IF EXISTS Issue34228; +CREATE TABLE Issue34228 (id bigint NOT NULL, dt datetime NOT NULL) PARTITION BY RANGE COLUMNS(dt) (PARTITION p202201 VALUES LESS THAN ("2022-02-01"), PARTITION p202202 VALUES LESS THAN ("2022-03-01")); +INSERT INTO Issue34228 VALUES (1, '2022-02-01 00:00:02'), (2, '2022-02-01 00:00:02'); +SET @@global.tidb_analyze_version = 1; +SET @@session.tidb_partition_prune_mode = 'static'; +ANALYZE TABLE Issue34228; +SET @@session.tidb_partition_prune_mode = 'dynamic'; +ANALYZE TABLE Issue34228; +--sorted_result +SELECT * FROM Issue34228; + +connection default; +disconnect conn1; +SET @@global.tidb_analyze_version = default; +SET @@session.tidb_partition_prune_mode = default; diff --git a/tests/integrationtest/t/executor/autoid.test b/tests/integrationtest/t/executor/autoid.test new file mode 100644 index 0000000000000..aa1651888cdb3 --- /dev/null +++ b/tests/integrationtest/t/executor/autoid.test @@ -0,0 +1,487 @@ +# TestAutoIncrementInsertMinMax +drop table if exists t0, t1, t2, t3, t4, t5, t6, t7, t8, t9; +create table t0 (a tinyint signed key auto_increment) ; +insert into t0 values (-128); +insert into t0 values (); +insert into t0 values (0); +insert into t0 values (); +insert into t0 values (127); +-- error 1690 +insert into t0 values (); +select * from t0 order by a; +drop table t0; +create table t1 (a tinyint unsigned key auto_increment) ; +insert into t1 values (0); +insert into t1 values (); +insert into t1 values (127); +insert into t1 values (); +insert into t1 values (255); +-- error 1690 +insert into t1 values (); +select * from t1 order by a; +drop table t1; +create table t2 (a smallint signed key auto_increment) ; +insert into t2 values (-32768); +insert into t2 values (); +insert into t2 values (0); +insert into t2 values (); +insert into t2 values (32767); +-- error 1690 +insert into t2 values (); +select * from t2 order by a; +drop table t2; +create table t3 (a smallint unsigned key auto_increment) ; +insert into t3 values (0); +insert into t3 values (); +insert into t3 values (32767); +insert into t3 values (); +insert into t3 values (65535); +-- error 1690 +insert into t3 values (); +select * from t3 order by a; +drop table t3; +create table t4 (a mediumint signed key auto_increment) ; +insert into t4 values (-8388608); +insert into t4 values (); +insert into t4 values (0); +insert into t4 values (); +insert into t4 values (8388607); +-- error 1690 +insert into t4 values (); +select * from t4 order by a; +drop table t4; +create table t5 (a mediumint unsigned key auto_increment) ; +insert into t5 values (0); +insert into t5 values (); +insert into t5 values (8388607); +insert into t5 values (); +insert into t5 values (16777215); +-- error 1690 +insert into t5 values (); +select * from t5 order by a; +drop table t5; +create table t6 (a integer signed key auto_increment) ; +insert into t6 values (-2147483648); +insert into t6 values (); +insert into t6 values (0); +insert into t6 values (); +insert into t6 values (2147483647); +-- error 1690 +insert into t6 values (); +select * from t6 order by a; +drop table t6; +create table t7 (a integer unsigned key auto_increment) ; +insert into t7 values (0); +insert into t7 values (); +insert into t7 values (2147483647); +insert into t7 values (); +insert into t7 values (4294967295); +-- error 1690 +insert into t7 values (); +select * from t7 order by a; +drop table t7; +create table t8 (a bigint signed key auto_increment) ; +insert into t8 values (-9223372036854775808); +insert into t8 values (); +insert into t8 values (0); +insert into t8 values (); +insert into t8 values (9223372036854775807); +-- error 1467 +insert into t8 values (); +select * from t8 order by a; +drop table t8; +create table t9 (a bigint unsigned key auto_increment) ; +insert into t9 values (0); +insert into t9 values (); +insert into t9 values (9223372036854775807); +insert into t9 values (); +select * from t9 order by a; +drop table t9; +create table t0 (a tinyint signed key auto_increment) auto_id_cache 1; +insert into t0 values (-128); +insert into t0 values (); +insert into t0 values (0); +insert into t0 values (); +insert into t0 values (127); +-- error 1690 +insert into t0 values (); +select * from t0 order by a; +drop table t0; +create table t1 (a tinyint unsigned key auto_increment) auto_id_cache 1; +insert into t1 values (0); +insert into t1 values (); +insert into t1 values (127); +insert into t1 values (); +insert into t1 values (255); +-- error 1690 +insert into t1 values (); +select * from t1 order by a; +drop table t1; +create table t2 (a smallint signed key auto_increment) auto_id_cache 1; +insert into t2 values (-32768); +insert into t2 values (); +insert into t2 values (0); +insert into t2 values (); +insert into t2 values (32767); +-- error 1690 +insert into t2 values (); +select * from t2 order by a; +drop table t2; +create table t3 (a smallint unsigned key auto_increment) auto_id_cache 1; +insert into t3 values (0); +insert into t3 values (); +insert into t3 values (32767); +insert into t3 values (); +insert into t3 values (65535); +-- error 1690 +insert into t3 values (); +select * from t3 order by a; +drop table t3; +create table t4 (a mediumint signed key auto_increment) auto_id_cache 1; +insert into t4 values (-8388608); +insert into t4 values (); +insert into t4 values (0); +insert into t4 values (); +insert into t4 values (8388607); +-- error 1690 +insert into t4 values (); +select * from t4 order by a; +drop table t4; +create table t5 (a mediumint unsigned key auto_increment) auto_id_cache 1; +insert into t5 values (0); +insert into t5 values (); +insert into t5 values (8388607); +insert into t5 values (); +insert into t5 values (16777215); +-- error 1690 +insert into t5 values (); +select * from t5 order by a; +drop table t5; +create table t6 (a integer signed key auto_increment) auto_id_cache 1; +insert into t6 values (-2147483648); +insert into t6 values (); +insert into t6 values (0); +insert into t6 values (); +insert into t6 values (2147483647); +-- error 1690 +insert into t6 values (); +select * from t6 order by a; +drop table t6; +create table t7 (a integer unsigned key auto_increment) auto_id_cache 1; +insert into t7 values (0); +insert into t7 values (); +insert into t7 values (2147483647); +insert into t7 values (); +insert into t7 values (4294967295); +-- error 1690 +insert into t7 values (); +select * from t7 order by a; +drop table t7; +create table t8 (a bigint signed key auto_increment) auto_id_cache 1; +insert into t8 values (-9223372036854775808); +insert into t8 values (); +insert into t8 values (0); +insert into t8 values (); +insert into t8 values (9223372036854775807); +-- error 1105 +insert into t8 values (); +select * from t8 order by a; +drop table t8; +create table t9 (a bigint unsigned key auto_increment) auto_id_cache 1; +insert into t9 values (0); +insert into t9 values (); +insert into t9 values (9223372036854775807); +insert into t9 values (); +select * from t9 order by a; +drop table t9; +create table t0 (a tinyint signed key auto_increment) auto_id_cache 100; +insert into t0 values (-128); +insert into t0 values (); +insert into t0 values (0); +insert into t0 values (); +insert into t0 values (127); +-- error 1690 +insert into t0 values (); +select * from t0 order by a; +drop table t0; +create table t1 (a tinyint unsigned key auto_increment) auto_id_cache 100; +insert into t1 values (0); +insert into t1 values (); +insert into t1 values (127); +insert into t1 values (); +insert into t1 values (255); +-- error 1690 +insert into t1 values (); +select * from t1 order by a; +drop table t1; +create table t2 (a smallint signed key auto_increment) auto_id_cache 100; +insert into t2 values (-32768); +insert into t2 values (); +insert into t2 values (0); +insert into t2 values (); +insert into t2 values (32767); +-- error 1690 +insert into t2 values (); +select * from t2 order by a; +drop table t2; +create table t3 (a smallint unsigned key auto_increment) auto_id_cache 100; +insert into t3 values (0); +insert into t3 values (); +insert into t3 values (32767); +insert into t3 values (); +insert into t3 values (65535); +-- error 1690 +insert into t3 values (); +select * from t3 order by a; +drop table t3; +create table t4 (a mediumint signed key auto_increment) auto_id_cache 100; +insert into t4 values (-8388608); +insert into t4 values (); +insert into t4 values (0); +insert into t4 values (); +insert into t4 values (8388607); +-- error 1690 +insert into t4 values (); +select * from t4 order by a; +drop table t4; +create table t5 (a mediumint unsigned key auto_increment) auto_id_cache 100; +insert into t5 values (0); +insert into t5 values (); +insert into t5 values (8388607); +insert into t5 values (); +insert into t5 values (16777215); +-- error 1690 +insert into t5 values (); +select * from t5 order by a; +drop table t5; +create table t6 (a integer signed key auto_increment) auto_id_cache 100; +insert into t6 values (-2147483648); +insert into t6 values (); +insert into t6 values (0); +insert into t6 values (); +insert into t6 values (2147483647); +-- error 1690 +insert into t6 values (); +select * from t6 order by a; +drop table t6; +create table t7 (a integer unsigned key auto_increment) auto_id_cache 100; +insert into t7 values (0); +insert into t7 values (); +insert into t7 values (2147483647); +insert into t7 values (); +insert into t7 values (4294967295); +-- error 1690 +insert into t7 values (); +select * from t7 order by a; +drop table t7; +create table t8 (a bigint signed key auto_increment) auto_id_cache 100; +insert into t8 values (-9223372036854775808); +insert into t8 values (); +insert into t8 values (0); +insert into t8 values (); +insert into t8 values (9223372036854775807); +-- error 1467 +insert into t8 values (); +select * from t8 order by a; +drop table t8; +create table t9 (a bigint unsigned key auto_increment) auto_id_cache 100; +insert into t9 values (0); +insert into t9 values (); +insert into t9 values (9223372036854775807); +insert into t9 values (); +select * from t9 order by a; +drop table t9; +create table t10 (a integer key auto_increment) auto_id_cache 1; +-- error 1264 +insert into t10 values (2147483648); +-- error 1264 +insert into t10 values (-2147483649); + +# TestRenameTableForAutoIncrement +drop table if exists t1, t2, t3, t11, t22, t33; +create table t1 (id int key auto_increment); +insert into t1 values (); +rename table t1 to t11; +insert into t11 values (); +## TODO(tiancaiamao): fix bug and uncomment here, rename table should not discard the cached AUTO_ID. +select * from t11; + +## auto_id_cache 1 use another implementation and do not have such bug. +create table t2 (id int key auto_increment) auto_id_cache 1; +insert into t2 values (); +rename table t2 to t22; +insert into t22 values (); +select * from t22; + +create table t3 (id int key auto_increment) auto_id_cache 100; +insert into t3 values (); +rename table t3 to t33; +insert into t33 values (); +## TODO(tiancaiamao): fix bug and uncomment here, rename table should not discard the cached AUTO_ID. +select * from t33; + +# TestAutoIDConstraint +# Remove the constraint that auto id column must be defined as a key +# See https://github.com/pingcap/tidb/issues/40580 +drop table if exists t0; +create table t0 (id int auto_increment,k int,c char(120)) ; +drop table if exists t1; +create table t1 (id int auto_increment,k int,c char(120)) engine = MyISAM; +drop table if exists t2; +create table t2 (id int auto_increment,k int,c char(120)) engine = InnoDB; +drop table if exists t3; +create table t3 (id int auto_increment,k int,c char(120)) auto_id_cache 1; +drop table if exists t4; +create table t4 (id int auto_increment,k int,c char(120)) auto_id_cache 100; +drop table if exists t5; +create table t5 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) ; +drop table if exists t6; +create table t6 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) engine = MyISAM; +drop table if exists t7; +create table t7 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) engine = InnoDB; +drop table if exists t8; +create table t8 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) auto_id_cache 1; +drop table if exists t9; +create table t9 (id int auto_increment,k int,c char(120),PRIMARY KEY(k, id)) auto_id_cache 100; +drop table if exists t10; +create table t10 (id int auto_increment,k int,c char(120),key idx_1(id)) ; +drop table if exists t11; +create table t11 (id int auto_increment,k int,c char(120),key idx_1(id)) engine = MyISAM; +drop table if exists t12; +create table t12 (id int auto_increment,k int,c char(120),key idx_1(id)) engine = InnoDB; +drop table if exists t13; +create table t13 (id int auto_increment,k int,c char(120),key idx_1(id)) auto_id_cache 1; +drop table if exists t14; +create table t14 (id int auto_increment,k int,c char(120),key idx_1(id)) auto_id_cache 100; +drop table if exists t15; +create table t15 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) ; +drop table if exists t16; +create table t16 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) engine = MyISAM; +drop table if exists t17; +create table t17 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) engine = InnoDB; +drop table if exists t18; +create table t18 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) auto_id_cache 1; +drop table if exists t19; +create table t19 (id int auto_increment,k int,c char(120),PRIMARY KEY(`k`, `id`), key idx_1(id)) auto_id_cache 100; + +## alter table add auto id column is not supported, but cover it here to prevent regression +create table tt1 (id int); +-- error 8200 +alter table tt1 add column (c int auto_increment); + +## Cover case: create table with auto id column as key, and remove it later +create table tt2 (id int, c int auto_increment, key c_idx(c)); +alter table tt2 drop index c_idx; + +# TestAlterTableAutoIDCache +drop table if exists t_473; +create table t_473 (id int key auto_increment); +insert into t_473 values (); +select * from t_473; +show table t_473 next_row_id; +alter table t_473 auto_id_cache = 100; +show table t_473 next_row_id; +insert into t_473 values (); +select * from t_473; +show table t_473 next_row_id; + +## Note that auto_id_cache=1 use a different implementation, switch between them is not allowed. +## TODO: relax this restriction and update the test case. +-- error 1105 +alter table t_473 auto_id_cache = 1; + +# TestAutoIDIncrementAndOffset There is a potential issue in MySQL: when the value of auto_increment_offset is greater +# than that of auto_increment_increment, the value of auto_increment_offset is ignored +# (https://dev.mysql.com/doc/refman/8.0/en/replication-options-master.html#sysvar_auto_increment_increment), +# This issue is a flaw of the implementation of MySQL and it doesn't exist in TiDB. +drop table if exists io; +set auto_increment_offset = 10; +set auto_increment_increment = 5; + +create table io (a int key auto_increment); +insert into io values (null),(null),(null); +select * from io; +drop table io; +create table io (a int key auto_increment) AUTO_ID_CACHE 1; +insert into io values (null),(null),(null); +select * from io; +drop table io; + +## Test handle is PK. +create table io (a int key auto_increment); +set auto_increment_offset = 10; +set auto_increment_increment = 2; +insert into io values (),(),(); +select * from io; +delete from io; +set auto_increment_increment = 5; +insert into io values (),(),(); +select * from io; +delete from io; +set auto_increment_increment = 10; +insert into io values (),(),(); +select * from io; +delete from io; +set auto_increment_increment = 5; +insert into io values (),(),(); +select * from io; +drop table io; +create table io (a int key auto_increment) AUTO_ID_CACHE 1; +set auto_increment_offset = 10; +set auto_increment_increment = 2; +insert into io values (),(),(); +select * from io; +delete from io; +set auto_increment_increment = 5; +insert into io values (),(),(); +select * from io; +delete from io; +set auto_increment_increment = 10; +insert into io values (),(),(); +select * from io; +delete from io; +set auto_increment_increment = 5; +insert into io values (),(),(); +select * from io; +drop table io; + +## Test handle is not PK. +set auto_increment_offset = 10; +set auto_increment_increment = 2; +create table io (a int, b int auto_increment, key(b)); +insert into io(b) values (null),(null),(null); +select b from io; +select _tidb_rowid from io; +delete from io; +set auto_increment_increment = 10; +insert into io(b) values (null),(null),(null); +select b from io; +select _tidb_rowid from io; +drop table io; +set auto_increment_offset = 10; +set auto_increment_increment = 2; +create table io (a int, b int auto_increment, key(b)) AUTO_ID_CACHE 1; +insert into io(b) values (null),(null),(null); +select b from io; +select _tidb_rowid from io; +delete from io; +set auto_increment_increment = 10; +insert into io(b) values (null),(null),(null); +select b from io; +select _tidb_rowid from io; +drop table io; + +set auto_increment_offset = -1; +show warnings; +set auto_increment_increment = -2; +show warnings; +show variables like 'auto_increment%'; + +set auto_increment_offset = 65536; +show warnings; +set auto_increment_increment = 65536; +show warnings; +show variables like 'auto_increment%'; + +set auto_increment_offset = default; +set auto_increment_increment = default; diff --git a/tests/integrationtest/t/executor/charset.test b/tests/integrationtest/t/executor/charset.test index a5c3a3abca482..c97caaf04db7e 100644 --- a/tests/integrationtest/t/executor/charset.test +++ b/tests/integrationtest/t/executor/charset.test @@ -60,3 +60,22 @@ insert into t values ('a', '中文'), ('中文', '中文'), ('一二三', '一 --sorted_result select * from t; +# TestForbidUnsupportedCollations +-- error 1273 +select 'a' collate utf8_roman_ci; +-- error 1273 +select cast('a' as char) collate utf8_roman_ci; +-- error 1273 +set names utf8 collate utf8_roman_ci; +-- error 1273 +set session collation_server = 'utf8_roman_ci'; +-- error 1273 +set session collation_database = 'utf8_roman_ci'; +-- error 1273 +set session collation_connection = 'utf8_roman_ci'; +-- error 1273 +set global collation_server = 'utf8_roman_ci'; +-- error 1273 +set global collation_database = 'utf8_roman_ci'; +-- error 1273 +set global collation_connection = 'utf8_roman_ci'; diff --git a/tests/integrationtest/t/executor/chunk_reuse.test b/tests/integrationtest/t/executor/chunk_reuse.test new file mode 100644 index 0000000000000..d75d1ba3cc742 --- /dev/null +++ b/tests/integrationtest/t/executor/chunk_reuse.test @@ -0,0 +1,132 @@ +# TestLongBlobReuse +drop table if exists t1, t2, t3, t4; +create table t1 (id1 int ,id2 char(10) ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`) clustered,key id2(id2)); +insert into t1 (id1,id2)values(1,1); +insert into t1 (id1,id2)values(2,2),(3,3); +create table t2 (id1 int ,id2 char(10) ,id3 text,id4 blob,id5 json,id6 varchar(1000),PRIMARY KEY (`id1`) clustered,key id2(id2)); +insert into t2 (id1,id2)values(1,1); +insert into t2 (id1,id2)values(2,2),(3,3); +explain format='brief' select t1.id1 from t1,t2 where t1.id2 > '1' and t2.id2 > '1'; +--sorted_result +select t1.id1 from t1,t2 where t1.id2 > '1' and t2.id2 > '1'; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id2 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id3 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id4 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id5 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id6 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id7 from t1,t2 where t1.id2 > '1' and t2.id2 > '1' ; +select @@last_sql_use_alloc; +explain format='brief' select t1.id1 from t1,t2 where t1.id2 > '1'and t1.id1 = t2.id1; +--sorted_result +select t1.id1 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1 ,t1.id3 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1 ,t1.id4 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1 ,t1.id5 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1 ,t1.id6 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1 ,t1.id7 from t1,t2 where t1.id2 > '1' and t1.id1 = t2.id1; +select @@last_sql_use_alloc; +explain format='brief' select t1.id1 from t1,t2 where t1.id1 = 1 and t2.id1 = 1; +--sorted_result +select t1.id1 from t1,t2 where t1.id1 = 1 and t2.id1 = 1; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id2 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id3 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id4 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id5 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id6 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id7 from t1,t2 where t1.id1 = 1 and t2.id1 = 1 ; +select @@last_sql_use_alloc; +explain format='brief' select t1.id1,t1.id6 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2' ; +--sorted_result +select t1.id1,t1.id6 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id3 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id4 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id5 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id7 ,t2.id6 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +select @@last_sql_use_alloc; +--sorted_result +select t1.id1,t1.id6 ,t2.id3 from t1 join t2 on t1.id2 = '1' and t2.id2 = '2'; +select @@last_sql_use_alloc; +create table t3 (id1 int ,id2 char(10),id8 int ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`) clustered,key id2(id2),key id8(id8)); +insert into t3 (id1,id2,id8)values(1,1,1),(2,2,2),(3,3,3); +explain format='brief' select id1 from t3 where id2 > '3' or id8 < 10 union (select id1 from t3 where id2 > '4' or id8 < 7); +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select id1 from t3 where id2 > '4' or id8 < 7); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select id3 from t3 where id2 > '4' or id8 < 7); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select id4 from t3 where id2 > '4' or id8 < 7); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select id5 from t3 where id2 > '4' or id8 < 7); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select id6 from t3 where id2 > '4' or id8 < 7); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select id7 from t3 where id2 > '4' or id8 < 7); +select @@last_sql_use_alloc; +set tidb_enable_clustered_index = OFF; +create table t4 (id1 int ,id2 char(10),id8 int ,id3 text,id4 blob,id5 json,id6 varchar(1000),id7 varchar(1001), PRIMARY KEY (`id1`),key id2(id2),key id8(id8,id2)); +insert into t4 (id1,id2,id8)values(1,1,1),(2,2,2),(3,3,3); +explain format='brief' select id2 from t4 where id2 > '3' union (select id2 from t4 where id2 > '4'); +--sorted_result +select id2 from t4 where id2 > '3' union (select id2 from t4 where id2 > '4'); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select CHAR_LENGTH(id3) from t3 where id2 > '4' or id8 < 7); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select CHAR_LENGTH(id2) from t3 where id2 > '4' or id8 < 7); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select id2 from t3 where id2 > '4' or id8 < 7 and id3 is null); +select @@last_sql_use_alloc; +--sorted_result +select id1 from t3 where id2 > '3' or id8 < 10 union (select id2 from t3 where id2 > '4' or id8 < 7 and char_length(id3) > 0); +select @@last_sql_use_alloc; +set tidb_enable_clustered_index = default; + diff --git a/tests/integrationtest/t/executor/cluster_table.test b/tests/integrationtest/t/executor/cluster_table.test new file mode 100644 index 0000000000000..fb4ae84cad2cf --- /dev/null +++ b/tests/integrationtest/t/executor/cluster_table.test @@ -0,0 +1,56 @@ +# TestFunctionEncodeSQLDigest +drop table if exists test_func_encode_sql_digest; +create table test_func_encode_sql_digest(id int primary key, v int); +select tidb_encode_sql_digest("begin"); +select tidb_encode_sql_digest("select @@tidb_current_ts"); +select tidb_encode_sql_digest("select id, v from test_func_decode_sql_digests where id = 1 for update"); +select tidb_encode_sql_digest(null); +-- error 1582 +select tidb_encode_sql_digest(); +select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 1;'); +select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 1 ;'); +select (select tidb_encode_sql_digest('select 1')) = tidb_encode_sql_digest('select 2 ;'); + +# TestFunctionDecodeSQLDigestsPrivilege +drop user if exists 'testuser'@'localhost'; +create user 'testuser'@'localhost'; + +connect (conn1, localhost, testuser,,); +connection conn1; +-- error 1227 +select tidb_decode_sql_digests('["aa"]'); +connection default; + +grant process on *.* to 'testuser'@'localhost'; +connection conn1; +select tidb_decode_sql_digests('["aa"]'); +connection default; +disconnect conn1; +drop user 'testuser'@'localhost'; + +# TestFunctionDecodeSQLDigests +set global tidb_enable_stmt_summary = 1; +select @@global.tidb_enable_stmt_summary; +drop table if exists test_func_decode_sql_digests; +create table test_func_decode_sql_digests(id int primary key, v int); +begin; +--disable_result_log +select @@tidb_current_ts; +--enable_result_log +select id, v from test_func_decode_sql_digests where id = 1 for update; +rollback; +select tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5","58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44","7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9"]', 0); +select tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5","58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44","7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9"]', 24); +select tidb_decode_sql_digests('[]'); +select tidb_decode_sql_digests(null); +select tidb_decode_sql_digests('["e6f07d43b5c21db0fbb9a31feac2dc599787763393dd5acbfad80e247eb02ad5",1,null,"58f3717da2d79c14773a1e3094aaddeff2b11747d3aef95741151af9acba9d44",{"a":1},[2],"7e9f826ed22a09940940a42d1aca47a75b3adc3c3fde252f4b912ac886194eb9","","abcde"]'); +select tidb_decode_sql_digests('{"a":1}'); +show warnings; +select tidb_decode_sql_digests('aabbccdd'); +show warnings; +-- error 1582 +select tidb_decode_sql_digests('a', 1, 2); +-- error 1582 +select tidb_decode_sql_digests(); +set global tidb_enable_stmt_summary = default; + diff --git a/tests/integrationtest/t/executor/compact_table.test b/tests/integrationtest/t/executor/compact_table.test new file mode 100644 index 0000000000000..19d3c6402904a --- /dev/null +++ b/tests/integrationtest/t/executor/compact_table.test @@ -0,0 +1,52 @@ +# TestCompactUnknownTable +drop table if exists bar, foo; +connect (conn1, localhost, root,,); +connection conn1; +-- error 1046 +alter table test compact tiflash replica; +-- error 1146 +alter table executor__compact_table.foo compact tiflash replica; +use executor__compact_table; +-- error 1146 +alter table bar compact; +connection default; +disconnect conn1; + +# TestCompactTableNoTiFlashReplica +drop table if exists t; +create table t(a int); +alter table t compact tiflash replica; +show warnings; +alter table executor__compact_table.t compact; +show warnings; +connect (conn1, localhost, root,, executor__compact_table); +connection conn1; +alter table executor__compact_table.t compact; +show warnings; +connection default; +disconnect conn1; + +# TestCompactTableNoPartition +drop table if exists t; +create table t(a int); +-- error 1105 +alter table t compact partition p1,p2 tiflash replica; + +# TestCompactTablePartitionInvalid +drop table if exists t; +CREATE TABLE t ( + id INT NOT NULL AUTO_INCREMENT PRIMARY KEY, + fname VARCHAR(25) NOT NULL, + lname VARCHAR(25) NOT NULL, + store_id INT NOT NULL, + department_id INT NOT NULL +) +PARTITION BY RANGE(id) ( + PARTITION p0 VALUES LESS THAN (5), + PARTITION p1 VALUES LESS THAN (10), + PARTITION p2 VALUES LESS THAN (15), + PARTITION p3 VALUES LESS THAN MAXVALUE +); +-- error 1735 +alter table t compact partition p1,p2,p4 tiflash replica; + diff --git a/tests/integrationtest/t/executor/cte.test b/tests/integrationtest/t/executor/cte.test new file mode 100644 index 0000000000000..25bdab72cdee4 --- /dev/null +++ b/tests/integrationtest/t/executor/cte.test @@ -0,0 +1,147 @@ +# TestBasicCTE +with recursive cte1 as (select 1 c1 union all select c1 + 1 c1 from cte1 where c1 < 5) select * from cte1; +with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < 10) select * from cte1 order by c1; +with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < 3 union all select c1 + 2 c1 from cte1 where c1 < 5) select * from cte1 order by c1; +drop table if exists t1; +create table t1(a int); +insert into t1 values(1); +insert into t1 values(2); +SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0) SELECT * FROM qn WHERE b=a); +SELECT * FROM t1 dt WHERE EXISTS( WITH RECURSIVE qn AS (SELECT a*0 AS b UNION ALL SELECT b+1 FROM qn WHERE b=0 or b = 1) SELECT * FROM qn WHERE b=a ); +with recursive c(p) as (select 1), cte(a, b) as (select 1, 1 union select a+1, 1 from cte, c where a < 5) select * from cte order by 1, 2; + +# TestUnionDistinct +with recursive cte1(c1) as (select 1 union select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1; +with recursive cte1(c1) as (select 1 union all select 1 union select 1 union all select c1 + 1 from cte1 where c1 < 3) select * from cte1 order by c1; +drop table if exists t1; +create table t1(c1 int, c2 int); +insert into t1 values(1, 1), (1, 2), (2, 2); +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from t1) select * from cte1 order by c1; +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(1), (1), (1), (2), (2), (2); +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 4) select * from cte1 order by c1; + +# TestCTEMaxRecursionDepth +set @@cte_max_recursion_depth = -1; +-- error 3636 +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 100) select * from cte1; +with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +set @@cte_max_recursion_depth = 0; +-- error 3636 +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1; +-- error 3636 +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1; +with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +set @@cte_max_recursion_depth = 1; +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 0) select * from cte1; +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 1) select * from cte1; +-- error 3636 +with recursive cte1(c1) as (select 1 union select c1 + 1 c1 from cte1 where c1 < 2) select * from cte1; +with recursive cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +with cte1(c1) as (select 1 union select 2) select * from cte1 order by c1; +set @@cte_max_recursion_depth = default; + +# TestCTEWithLimit +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 0) select * from cte1; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 1) select * from cte1; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 10) select * from cte1; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 995) select * from cte1; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 5 offset 6) select * from cte1; +set cte_max_recursion_depth=2; +with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; +-- error 3636 +with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; +set cte_max_recursion_depth=1000; +with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 996) select * from cte1; +-- error 3636 +with recursive cte1(c1) as (select 0 union select c1 + 1 from cte1 limit 5 offset 997) select * from cte1; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 0 offset 10) select * from cte1; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 order by dt1.c1, dt2.c1; +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1 order by dt1.c1, dt1.c1; +# Different with mysql, maybe it's mysql bug?(https://bugs.mysql.com/bug.php?id=103890&thanks=4) +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where c1 in (select 2); +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 dt where c1 in (select c1 from cte1 where 1 = dt.c1 - 1); +with recursive cte1(c1) as (select 1 union select c1 + 1 from cte1 limit 2 offset 1) select c1 from cte1 where cte1.c1 = (select dt1.c1 from cte1 dt1 where dt1.c1 = cte1.c1); +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(1), (2), (3); +-- error 1221 +with recursive cte1(c1) as (select c1 from t1 limit 1 offset 1 union select c1 + 1 from cte1 limit 0 offset 1) select * from cte1; +with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 1 offset 1) select * from cte1; +with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 0 offset 1) select * from cte1; +with recursive cte1(c1) as (select 1 union select 2 order by 1 limit 2 offset 0) select * from cte1; +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12), (13), (14), (15), (16), (17), (18), (19), (20), (21), (22), (23), (24), (25), (26), (27), (28), (29), (30), (31), (32), (33), (34), (35), (36), (37), (38), (39), (40), (41), (42), (43), (44), (45), (46), (47), (48), (49), (50), (51), (52), (53), (54), (55), (56), (57), (58), (59), (60), (61), (62), (63), (64), (65), (66), (67), (68), (69), (70), (71), (72), (73), (74), (75), (76), (77), (78), (79), (80), (81), (82), (83), (84), (85), (86), (87), (88), (89), (90), (91), (92), (93), (94), (95), (96), (97), (98), (99), (100), (101), (102), (103), (104), (105), (106), (107), (108), (109), (110), (111), (112), (113), (114), (115), (116), (117), (118), (119), (120), (121), (122), (123), (124), (125), (126), (127), (128), (129), (130), (131), (132), (133), (134), (135), (136), (137), (138), (139), (140), (141), (142), (143), (144), (145), (146), (147), (148), (149), (150), (151), (152), (153), (154), (155), (156), (157), (158), (159), (160), (161), (162), (163), (164), (165), (166), (167), (168), (169), (170), (171), (172), (173), (174), (175), (176), (177), (178), (179), (180), (181), (182), (183), (184), (185), (186), (187), (188), (189), (190), (191), (192), (193), (194), (195), (196), (197), (198), (199), (200), (201), (202), (203), (204), (205), (206), (207), (208), (209), (210), (211), (212), (213), (214), (215), (216), (217), (218), (219), (220), (221), (222), (223), (224), (225), (226), (227), (228), (229), (230), (231), (232), (233), (234), (235), (236), (237), (238), (239), (240), (241), (242), (243), (244), (245), (246), (247), (248), (249), (250), (251), (252), (253), (254), (255), (256), (257), (258), (259), (260), (261), (262), (263), (264), (265), (266), (267), (268), (269), (270), (271), (272), (273), (274), (275), (276), (277), (278), (279), (280), (281), (282), (283), (284), (285), (286), (287), (288), (289), (290), (291), (292), (293), (294), (295), (296), (297), (298), (299); +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 1 offset 100) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 c1 from cte1 limit 5 offset 100) select * from cte1; +with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1; +with cte1 as (select c1 from t1 limit 2 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; +with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select 2 limit 0 offset 1) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; +# with recursive cte1(c1) as (select c1 from t1 union select 2 limit 5 offset 100) select * from cte1" +with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 limit 3 offset 100) select * from cte1 dt1 join cte1 dt2 on dt1.c1 = dt2.c1; +set cte_max_recursion_depth = 0; +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(0); +with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 0) select * from cte1; +-- error 3636 +with recursive cte1 as (select 1/c1 c1 from t1 union select c1 + 1 c1 from cte1 where c1 < 2 limit 1) select * from cte1; +set cte_max_recursion_depth = 1000; +drop table if exists t1; +create table t1(c1 int); +insert into t1 values(1), (2), (3); +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 0 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 1 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 2 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 3 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union select c1 + 1 from cte1 limit 4 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 2) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 3) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 0 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 1 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 2 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 3 offset 4) select * from cte1; +with recursive cte1(c1) as (select c1 from t1 union all select c1 + 1 from cte1 limit 4 offset 4) select * from cte1; +set cte_max_recursion_depth = default; + +# TestCTEsInView +# https://github.com/pingcap/tidb/issues/33965 +drop table if exists executor__cte.t; +drop view if exists executor__cte.v; +create database if not exists executor__cte1; +create table executor__cte.t (a int); +create table executor__cte1.t (a int); +insert into executor__cte.t values (1); +insert into executor__cte1.t values (2); +create definer='root'@'localhost' view executor__cte.v as with tt as (select * from t) select * from tt; +select * from executor__cte.v; +use executor__cte1; +select * from executor__cte.v; +use executor__cte; +drop database executor__cte1; diff --git a/tests/integrationtest/t/executor/executor_txn.test b/tests/integrationtest/t/executor/executor_txn.test new file mode 100644 index 0000000000000..557c6f1b041fc --- /dev/null +++ b/tests/integrationtest/t/executor/executor_txn.test @@ -0,0 +1,133 @@ +# TestRollbackToSavepoint +drop table if exists t; +create table t(id int, a int, unique index idx(id)); +begin pessimistic; +insert into t values (1,1); +savepoint s1; +insert into t values (2,2); +rollback to s1; +insert into t values (2,2); +select * from t; +rollback to s1; +select * from t; +commit; +select * from t; +delete from t; +insert into t values (1,1); +begin pessimistic; +delete from t where id = 1; +savepoint s1; +insert into t values (1,2); +rollback to s1; +select * from t; +commit; +select * from t; + +# TestSavepointRandTestIssue0 +drop table if exists t; +CREATE TABLE t (a enum('B','C') NOT NULL,UNIQUE KEY idx_1 (a),KEY idx_2 (a)); +begin pessimistic; +savepoint sp0; +insert ignore into t values ( 'B' ),( 'C' ); +-- error 1062 +update t set a = 'C' where a = 'B'; +select * from t where a = 'B' for update; +rollback to sp0; +delete from t where a = 'B' ; +rollback; + +# TestSavepointWithTemporaryTable +set session tidb_txn_mode='optimistic'; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode='pessimistic'; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode=''; +drop table if exists tmp1; +create temporary table tmp1 (id int primary key auto_increment, u int unique, v int); +insert into tmp1 values(1, 11, 101); +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode='optimistic'; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode='pessimistic'; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode=''; +drop table if exists tmp1; +create global temporary table tmp1 (id int primary key auto_increment, u int unique, v int) on commit delete rows; +begin; +savepoint sp0; +insert into tmp1 values(2, 22, 202); +savepoint sp1; +insert into tmp1 values(3, 33, 303); +savepoint sp2; +insert into tmp1 values(4, 44, 404); +rollback to sp2; +select * from tmp1 order by id; +rollback to sp1; +select * from tmp1 order by id; +commit; +select * from tmp1 order by id; + +set session tidb_txn_mode=default; diff --git a/tests/integrationtest/t/executor/explain.test b/tests/integrationtest/t/executor/explain.test new file mode 100644 index 0000000000000..5d72e5d223a22 --- /dev/null +++ b/tests/integrationtest/t/executor/explain.test @@ -0,0 +1,164 @@ +# TestExplainCartesianJoin +drop table if exists t; +create table t (v int); +explain format = 'brief' select * from t t1, t t2; +explain format = 'brief' select * from t t1 where exists (select 1 from t t2 where t2.v > t1.v); +explain format = 'brief' select * from t t1 where exists (select 1 from t t2 where t2.v in (t1.v+1, t1.v+2)); +explain format = 'brief' select * from t t1, t t2 where t1.v = t2.v; + +# TestExplainWrite +drop table if exists t; +create table t (a int); +--disable_result_log +explain analyze insert into t select 1; +--enable_result_log +select * from t; +--disable_result_log +explain analyze update t set a=2 where a=1; +--enable_result_log +select * from t; +--disable_result_log +explain format = 'brief' insert into t select 1; +--enable_result_log +select * from t; +--disable_result_log +explain analyze insert into t select 1; +explain analyze replace into t values (3); +--enable_result_log +select * from t order by a; + +# TestExplainStatementsSummary +desc format='brief' select * from information_schema.statements_summary; +desc format='brief' select * from information_schema.statements_summary where digest is null; +desc format='brief' select * from information_schema.statements_summary where digest = 'abcdefg'; +desc format='brief' select * from information_schema.statements_summary where digest in ('a','b','c'); + +# TestFix29401 +drop table if exists tt123; +CREATE TABLE tt123 ( + id int(11) NOT NULL, + a bigint(20) DEFAULT NULL, + b char(20) DEFAULT NULL, + c datetime DEFAULT NULL, + d double DEFAULT NULL, + e json DEFAULT NULL, + f decimal(40,6) DEFAULT NULL, + PRIMARY KEY (id) /*T![clustered_index] CLUSTERED */, + KEY a (a), + KEY b (b), + KEY c (c), + KEY d (d), + KEY f (f) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; + explain format='brief' select /*+ inl_hash_join(t1) */ * from tt123 t1 join tt123 t2 on t1.b=t2.e; + +# TestIssue35105 +drop table if exists t; +create table t (a int primary key); +insert into t values (2); +set @@tidb_constraint_check_in_place=1; +-- error 1062 +explain analyze insert into t values (1), (2), (3); +select * from t; +set @@tidb_constraint_check_in_place=DEFAULT; + +# TestExplainFormatPlanCache +drop table if exists t; +create table t(a int); +set @@session.tidb_enable_non_prepared_plan_cache = 1; +select * from t limit 1; +select * from t limit 1; +explain format = 'plan_cache' select * from (select * from t) t1 limit 1; +show warnings; +explain format = 'plan_cache' select * from (select * from t) t1 limit 1; +select @@last_plan_from_cache; +--disable_result_log +explain analyze format = 'plan_cache' select * from (select * from t) t1 limit 1; +--enable_result_log +show warnings; +--disable_result_log +explain analyze format = 'plan_cache' select * from (select * from t) t1 limit 1; +--enable_result_log +select @@last_plan_from_cache; +explain format = 'plan_cache' select * from t; +show warnings; +explain format = 'plan_cache' select * from t; +select @@last_plan_from_cache; +--disable_result_log +explain analyze format = 'plan_cache' select * from t; +--enable_result_log +show warnings; +--disable_result_log +explain analyze format = 'plan_cache' select * from t; +--enable_result_log +select @@last_plan_from_cache; +explain select * from t; +select @@last_plan_from_cache; +explain format = 'brief' select * from t; +select @@last_plan_from_cache; +explain format = 'dot' select * from t; +select @@last_plan_from_cache; +explain format = 'hint' select * from t; +select @@last_plan_from_cache; +explain format = 'row' select * from t; +select @@last_plan_from_cache; +explain format = 'verbose' select * from t; +select @@last_plan_from_cache; +explain format = 'traditional' select * from t; +select @@last_plan_from_cache; +explain format = 'binary' select * from t; +select @@last_plan_from_cache; +explain format = 'tidb_json' select * from t; +select @@last_plan_from_cache; +explain format = 'cost_trace' select * from t; +select @@last_plan_from_cache; +set @@session.tidb_enable_non_prepared_plan_cache = DEFAULT; + +# TestExplainPrivileges +drop table if exists t; +drop view if exists v; +drop user if exists 'explain'@'%'; +create table t (id int); +create view v as select * from t; +create user 'explain'@'%'; +grant select on executor__explain.v to 'explain'@'%'; + +connect (conn1, localhost, explain,,); +show databases; +use executor__explain; +select * from v; +-- error 1345 +explain format = 'brief' select * from v; + +connection default; +grant show view on executor__explain.v to 'explain'@'%'; +connection conn1; +explain format = 'brief' select * from v; + +connection default; +revoke select on executor__explain.v from 'explain'@'%'; +connection conn1; +-- error 1142 +explain format = 'brief' select * from v; + +connection default; +create table t1 (i int); +create table t2 (j int); +create table t3 (k int, secret int); +create view v1 as select * from t1; +create view v2 as select * from v1, t2; +create view v3 as select k from t3; +grant select, show view on executor__explain.v2 to 'explain'@'%'; +grant show view on executor__explain.v1 to 'explain'@'%'; +grant select, show view on executor__explain.t3 to 'explain'@'%'; +grant select, show view on executor__explain.v3 to 'explain'@'%'; + +connection conn1; +-- error 1142 +explain select * from v1; +-- error 1345 +explain select * from v2; +explain select * from t3; +explain select * from v3; +disconnect conn1; + diff --git a/tests/integrationtest/t/executor/explainfor.test b/tests/integrationtest/t/executor/explainfor.test new file mode 100644 index 0000000000000..454976647e409 --- /dev/null +++ b/tests/integrationtest/t/executor/explainfor.test @@ -0,0 +1,432 @@ +# TestExplainMemTablePredicate +desc format='brief' select * from METRICS_SCHEMA.tidb_query_duration where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13' ; +desc format='brief' select * from METRICS_SCHEMA.up where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13' ; +desc format='brief' select * from information_schema.cluster_log where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +desc format='brief' select * from information_schema.cluster_log where level in ('warn','error') and time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +desc format='brief' select * from information_schema.cluster_log where type in ('high_cpu_1','high_memory_1') and time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +desc format='brief' select * from information_schema.slow_query; +desc format='brief' select * from information_schema.slow_query where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +set @@time_zone = '+00:00'; +desc format='brief' select * from information_schema.slow_query where time >= '2019-12-23 16:10:13' and time <= '2019-12-23 16:30:13'; +set @@time_zone = default; + +# TestExplainClusterTable +desc format='brief' select * from information_schema.cluster_config where type in ('tikv', 'tidb'); +desc format='brief' select * from information_schema.cluster_config where instance='192.168.1.7:2379'; +desc format='brief' select * from information_schema.cluster_config where type='tidb' and instance='192.168.1.7:2379'; + +# TestInspectionResultTable +desc format='brief' select * from information_schema.inspection_result where rule = 'ddl' and rule = 'config'; +desc format='brief' select * from information_schema.inspection_result where rule in ('ddl', 'config'); +desc format='brief' select * from information_schema.inspection_result where item in ('ddl.lease', 'raftstore.threadpool'); +desc format='brief' select * from information_schema.inspection_result where item in ('ddl.lease', 'raftstore.threadpool') and rule in ('ddl', 'config'); + +# TestInspectionRuleTable +desc format='brief' select * from information_schema.inspection_rules where type='inspection'; +desc format='brief' select * from information_schema.inspection_rules where type='inspection' or type='summary'; +desc format='brief' select * from information_schema.inspection_rules where type='inspection' and type='summary'; + +# TestSavedPlanPanicPlanCache +set tidb_enable_prepared_plan_cache=1; +drop table if exists t; +create table t(a int, b int, c int generated always as (a+b) stored); +insert into t(a,b) values(1,1); +begin; +update t set b = 2 where a = 1; +prepare stmt from 'select b from t where a > ?'; +set @p = 0; +execute stmt using @p; +set @p = 1; +execute stmt using @p; +-- error 3105 +insert into t(a,b,c) values(3,3,3); +rollback; +set tidb_enable_prepared_plan_cache=default; + +# TestExplainTableStorage +desc format='brief' select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema'; +desc format='brief' select * from information_schema.TABLE_STORAGE_STATS where TABLE_NAME = 'schemata'; +desc format='brief' select * from information_schema.TABLE_STORAGE_STATS where TABLE_SCHEMA = 'information_schema' and TABLE_NAME = 'schemata'; + +# TestInspectionSummaryTable +desc format='brief' select * from information_schema.inspection_summary where rule='ddl'; +desc format='brief' select * from information_schema.inspection_summary where 'ddl'=rule or rule='config'; +desc format='brief' select * from information_schema.inspection_summary where 'ddl'=rule or rule='config' or rule='slow_query'; +desc format='brief' select * from information_schema.inspection_summary where (rule='config' or rule='slow_query') and (metrics_name='metric_name3' or metrics_name='metric_name1'); +desc format='brief' select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query'); +desc format='brief' select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query') and metrics_name='metric_name1'; +desc format='brief' select * from information_schema.inspection_summary where rule in ('ddl', 'slow_query') and metrics_name in ('metric_name1', 'metric_name2'); +desc format='brief' select * from information_schema.inspection_summary where rule='ddl' and metrics_name in ('metric_name1', 'metric_name2'); +desc format='brief' select * from information_schema.inspection_summary where rule='ddl' and metrics_name='metric_NAME3'; +desc format='brief' select * from information_schema.inspection_summary where rule in ('ddl', 'config') and rule in ('slow_query', 'config'); +desc format='brief' select * from information_schema.inspection_summary where metrics_name in ('metric_name1', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name4') and rule in ('ddl', 'config') and rule in ('slow_query', 'config') and quantile in (0.80, 0.90); +desc format='brief' select * from information_schema.inspection_summary where metrics_name in ('metric_name1', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name4') and metrics_name in ('metric_name5', 'metric_name1') and metrics_name in ('metric_name1', 'metric_name3'); + +# TestExplainTiFlashSystemTables +desc format='brief' select * from information_schema.TIFLASH_TABLES where TIFLASH_INSTANCE = '192.168.1.7:3930'; +desc format='brief' select * from information_schema.TIFLASH_SEGMENTS where TIFLASH_INSTANCE = '192.168.1.7:3930'; +desc format='brief' select * from information_schema.TIFLASH_TABLES where TIDB_DATABASE = 'test'; +desc format='brief' select * from information_schema.TIFLASH_SEGMENTS where TIDB_DATABASE = 'test'; +desc format='brief' select * from information_schema.TIFLASH_TABLES where TIDB_TABLE = 't'; +desc format='brief' select * from information_schema.TIFLASH_SEGMENTS where TIDB_TABLE = 't'; +desc format='brief' select * from information_schema.TIFLASH_TABLES where TIFLASH_INSTANCE = '192.168.1.7:3930' and TIDB_DATABASE = 'test' and TIDB_TABLE = 't'; +desc format='brief' select * from information_schema.TIFLASH_SEGMENTS where TIFLASH_INSTANCE = '192.168.1.7:3930' and TIDB_DATABASE = 'test' and TIDB_TABLE = 't'; + +# TestSetOperations4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t1, t2; +CREATE TABLE `t1` (a int); +CREATE TABLE `t2` (a int); +insert into t1 values(1), (2); +insert into t2 values(1), (3); +prepare stmt from 'select * from t1 where a > ? union select * from t2 where a > ?;'; +set @a=0, @b=1; +--sorted_result +execute stmt using @a, @b; +--sorted_result +execute stmt using @b, @a; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @b, @b; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @a, @a; +select @@last_plan_from_cache; +prepare stmt from 'select * from t1 where a > ? union all select * from t2 where a > ?;'; +set @a=0, @b=1; +--sorted_result +execute stmt using @a, @b; +--sorted_result +execute stmt using @b, @a; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @b, @b; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @a, @a; +select @@last_plan_from_cache; +prepare stmt from 'select * from t1 where a > ? except select * from t2 where a > ?;'; +set @a=0, @b=1; +--sorted_result +execute stmt using @a, @a; +--sorted_result +execute stmt using @b, @a; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @b, @b; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @a, @b; +select @@last_plan_from_cache; +prepare stmt from 'select * from t1 where a > ? union select * from t2 where a > ?;'; +set @a=0, @b=1; +--sorted_result +execute stmt using @a, @a; +--sorted_result +execute stmt using @b, @a; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @b, @b; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @a, @b; +select @@last_plan_from_cache; +prepare stmt from 'select * from t1 union all select * from t1 intersect select * from t2;'; +--sorted_result +execute stmt; +prepare stmt from '(select * from t1 union all select * from t1) intersect select * from t2;'; +--sorted_result +execute stmt; +prepare stmt from '(select * from t1 union all select * from t1 intersect select * from t2) order by a limit 2;'; +--sorted_result +execute stmt; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestHint4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t; +create table t(a int, index idx_a(a)); +prepare stmt from 'select * from t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestInvisibleIndex4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t; +CREATE TABLE t(c1 INT, index idx_c(c1)); +prepare stmt from 'select * from t use index(idx_c) where c1 > 1;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +ALTER TABLE t ALTER INDEX idx_c INVISIBLE; +-- error 1176 +select * from t use index(idx_c) where c1 > 1; +-- error 1176 +execute stmt; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestCTE4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +prepare stmt from 'with recursive cte1 as (select ? c1 union all select c1 + 1 c1 from cte1 where c1 < ?) select * from cte1;'; +set @a=5, @b=4, @c=2, @d=1; +--sorted_result +execute stmt using @d, @a; +--sorted_result +execute stmt using @d, @b; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @c, @b; +select @@last_plan_from_cache; +prepare stmt from 'with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < ?) select * from cte1 order by c1;'; +set @a=10, @b=2; +execute stmt using @a; +execute stmt using @b; +select @@last_plan_from_cache; +prepare stmt from 'with recursive cte1 as (select 1 c1 union all select 2 c1 union all select c1 + 1 c1 from cte1 where c1 < ? union all select c1 + ? c1 from cte1 where c1 < ?) select * from cte1 order by c1;'; +set @a=1, @b=2, @c=3, @d=4, @e=5; +--sorted_result +execute stmt using @c, @b, @e; +--sorted_result +execute stmt using @b, @a, @d; +select @@last_plan_from_cache; +drop table if exists t1; +create table t1(a int); +insert into t1 values(1); +insert into t1 values(2); +prepare stmt from 'SELECT * FROM t1 dt WHERE EXISTS(WITH RECURSIVE qn AS (SELECT a*? AS b UNION ALL SELECT b+? FROM qn WHERE b=?) SELECT * FROM qn WHERE b=a);'; +show warnings; +set @a=1, @b=2, @c=3, @d=4, @e=5, @f=0; +--sorted_result +execute stmt using @f, @a, @f; +--sorted_result +execute stmt using @a, @b, @a; +select @@last_plan_from_cache; +--sorted_result +execute stmt using @a, @b, @a; +prepare stmt from 'with recursive c(p) as (select ?), cte(a, b) as (select 1, 1 union select a+?, 1 from cte, c where a < ?) select * from cte order by 1, 2;'; +show warnings; +--sorted_result +execute stmt using @a, @a, @e; +--sorted_result +execute stmt using @b, @b, @c; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestValidity4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t; +create table t(a int); +prepare stmt from 'select * from t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +drop database if exists plan_cache; +create database plan_cache; +use plan_cache; +create table t(a int); +insert into t values(1); +execute stmt; +select @@last_plan_from_cache; +execute stmt; +select @@last_plan_from_cache; +prepare stmt from 'select * from t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +execute stmt; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestListPartition4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +set @@session.tidb_enable_list_partition=1; +drop table if exists t; +create table t(a int, b int) PARTITION BY LIST (a) ( PARTITION p0 VALUES IN (1, 2, 3), PARTITION p1 VALUES IN (4, 5, 6)); +set @@tidb_partition_prune_mode='static'; +prepare stmt from 'select * from t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; +set @@session.tidb_enable_list_partition=default; +set @@tidb_partition_prune_mode=default; + +# TestIssue28792 +drop table if exists t12, t97; +CREATE TABLE t12(a INT, b INT); +CREATE TABLE t97(a INT, b INT UNIQUE NOT NULL); +EXPLAIN SELECT t12.a, t12.b FROM t12 LEFT JOIN t97 on t12.b = t97.b; +EXPLAIN SELECT t12.a, t12.b FROM t12 LEFT JOIN t97 use index () on t12.b = t97.b; + +# TestMoreSessions4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists t; +create table t(a int); +prepare stmt from 'select * from t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; + +connect (conn1, localhost, root,, executor__explainfor); +set tidb_enable_prepared_plan_cache=1; +-- error 8111 +execute stmt; +prepare stmt from 'select * from t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +connection default; + +execute stmt; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestSelectView4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop view if exists view1, view2, view3, view4; +drop table if exists view_t; +create table view_t (a int,b int); +insert into view_t values(1,2); +create definer='root'@'localhost' view view1 as select * from view_t; +create definer='root'@'localhost' view view2(c,d) as select * from view_t; +create definer='root'@'localhost' view view3(c,d) as select a,b from view_t; +create definer='root'@'localhost' view view4 as select * from (select * from (select * from view_t) tb1) tb; +prepare stmt1 from 'select * from view1;'; +execute stmt1; +execute stmt1; +select @@last_plan_from_cache; +prepare stmt2 from 'select * from view2;'; +execute stmt2; +execute stmt2; +select @@last_plan_from_cache; +prepare stmt3 from 'select * from view3;'; +execute stmt3; +execute stmt3; +select @@last_plan_from_cache; +prepare stmt4 from 'select * from view4;'; +execute stmt4; +execute stmt4; +select @@last_plan_from_cache; +drop table view_t; +create table view_t(c int,d int); +-- error 1356 +execute stmt1; +-- error 1356 +execute stmt2; +-- error 1356 +execute stmt3; +drop table view_t; +create table view_t(a int,b int,c int); +insert into view_t values(1,2,3); +execute stmt1; +select @@last_plan_from_cache; +execute stmt1; +select @@last_plan_from_cache; +execute stmt2; +select @@last_plan_from_cache; +execute stmt2; +select @@last_plan_from_cache; +execute stmt3; +select @@last_plan_from_cache; +execute stmt3; +select @@last_plan_from_cache; +execute stmt4; +select @@last_plan_from_cache; +execute stmt4; +select @@last_plan_from_cache; +alter table view_t drop column a; +alter table view_t add column a int after b; +update view_t set a=1; +execute stmt1; +select @@last_plan_from_cache; +execute stmt1; +select @@last_plan_from_cache; +execute stmt2; +select @@last_plan_from_cache; +execute stmt2; +select @@last_plan_from_cache; +execute stmt3; +select @@last_plan_from_cache; +execute stmt3; +select @@last_plan_from_cache; +execute stmt4; +select @@last_plan_from_cache; +execute stmt4; +select @@last_plan_from_cache; +drop table view_t; +drop view view1,view2,view3,view4; +set @@tidb_enable_window_function = 1; +drop table if exists t; +create table t(a int, b int); +insert into t values (1,1),(1,2),(2,1),(2,2); +create definer='root'@'localhost' view v as select a, first_value(a) over(rows between 1 preceding and 1 following), last_value(a) over(rows between 1 preceding and 1 following) from t; +prepare stmt from 'select * from v;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +drop view v; +set @@tidb_enable_window_function = default; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestIgnorePlanCacheWithPrepare +drop table if exists t; +create table t(a int, index idx_a(a)); +drop table if exists r; +create table r(a int); +prepare stmt from 'select * from t;'; +create binding for select * from t using select /*+ use_index(t, idx_a) */ * from t; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +execute stmt; +select @@last_plan_from_binding; +create binding for select * from t using select /*+ ignore_plan_cache() */ * from t; +execute stmt; +select @@last_plan_from_cache; +execute stmt; +select @@last_plan_from_binding; +create binding for select * from t using select /*+ use_index(t, idx_a) */ * from t; +execute stmt; +select @@last_plan_from_cache; +execute stmt; +select @@last_plan_from_binding; +prepare stmt_join from 'select * from t, r where r.a = t.a;'; +create binding for select * from t, r where r.a = t.a using select /*+ straight_join() */* from t, r where r.a = t.a; +execute stmt_join; +execute stmt_join; +select @@last_plan_from_cache; +execute stmt_join; +select @@last_plan_from_binding; +create binding for select * from t, r where r.a = t.a using select /*+ ignore_plan_cache() */* from t, r where r.a = t.a; +execute stmt_join; +select @@last_plan_from_cache; +execute stmt_join; +select @@last_plan_from_binding; +create binding for select * from t, r where r.a = t.a using select /*+ straight_join() */* from t, r where r.a = t.a; +execute stmt_join; +select @@last_plan_from_cache; +execute stmt_join; +select @@last_plan_from_binding; + diff --git a/tests/integrationtest/t/executor/grant.test b/tests/integrationtest/t/executor/grant.test new file mode 100644 index 0000000000000..20c3709abbb21 --- /dev/null +++ b/tests/integrationtest/t/executor/grant.test @@ -0,0 +1,292 @@ +# TestWithGrantOption +drop user if exists 'testWithGrant'@'localhost'; +CREATE USER 'testWithGrant'@'localhost' IDENTIFIED BY '123'; +SELECT * FROM mysql.db WHERE User="testWithGrant" and host="localhost"; +GRANT select ON executor__grant.* TO 'testWithGrant'@'localhost' WITH GRANT OPTION; +SELECT grant_priv FROM mysql.DB WHERE User="testWithGrant" and host="localhost" and db="executor__grant"; +drop user if exists 'testWithGrant1'; +CREATE USER 'testWithGrant1'; +SELECT grant_priv FROM mysql.user WHERE User="testWithGrant1"; +GRANT ALL ON *.* TO 'testWithGrant1'; +SELECT grant_priv FROM mysql.user WHERE User="testWithGrant1"; +GRANT ALL ON *.* TO 'testWithGrant1' WITH GRANT OPTION; +SELECT grant_priv FROM mysql.user WHERE User="testWithGrant1"; + +# TestIssue2456 +drop user if exists 'dduser'@'%'; +drop DATABASE if exists `dddb_%`; +CREATE USER 'dduser'@'%' IDENTIFIED by '123456'; +CREATE DATABASE `dddb_%`; +CREATE table `dddb_%`.`te%` (id int); +GRANT ALL PRIVILEGES ON `dddb_%`.* TO 'dduser'@'%'; +GRANT ALL PRIVILEGES ON `dddb_%`.`te%` to 'dduser'@'%'; + +# TestNoAutoCreateUser +DROP USER IF EXISTS 'test'@'%'; +SET sql_mode='NO_AUTO_CREATE_USER'; +-- error 1410 +GRANT ALL PRIVILEGES ON *.* to 'test'@'%' IDENTIFIED BY 'xxx'; +set sql_mode=default; + +# TestCreateUserWhenGrant +DROP USER IF EXISTS 'test'@'%'; +# This only applies to sql_mode:NO_AUTO_CREATE_USER off +SET SQL_MODE=''; +GRANT ALL PRIVILEGES ON *.* to 'test'@'%' IDENTIFIED BY 'xxx'; +# Make sure user is created automatically when grant to a non-exists one. +SELECT user FROM mysql.user WHERE user='test' and host='%'; +DROP USER IF EXISTS 'test'@'%'; +# Grant without a password. +GRANT ALL PRIVILEGES ON *.* to 'test'@'%'; +# Make sure user is created automatically when grant to a non-exists one. +SELECT user, plugin FROM mysql.user WHERE user='test' and host='%'; +DROP USER IF EXISTS 'test'@'%'; +set sql_mode=default; + +# TestCreateUserWithTooLongName +-- error 1470 +CREATE USER '1234567890abcdefGHIKL1234567890abcdefGHIKL@localhost'; +-- error 1470 +CREATE USER 'some_user_name@host_1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890abcdefghij1234567890X'; + +# TestGrantPrivilegeAtomic +drop role if exists r1, r2, r3, r4; +create role r1, r2, r3; +create table executor__grant.testatomic(x int); +-- error 1410 +grant update, select, insert, delete on *.* to r1, r2, r4; +select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.user where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +grant update, select, insert, delete on *.* to r1, r2, r3; +-- error 1105 +revoke all on *.* from r1, r2, r4, r3; +select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.user where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +-- error 1410 +grant update, select, insert, delete on executor__grant.* to r1, r2, r4; +select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.db where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +grant update, select, insert, delete on executor__grant.* to r1, r2, r3; +-- error 1105 +revoke all on *.* from r1, r2, r4, r3; +select Update_priv, Select_priv, Insert_priv, Delete_priv from mysql.db where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +-- error 1410 +grant update, select, insert, delete on executor__grant.testatomic to r1, r2, r4; +select Table_priv from mysql.tables_priv where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +grant update, select, insert, delete on executor__grant.testatomic to r1, r2, r3; +-- error 1105 +revoke all on *.* from r1, r2, r4, r3; +select Table_priv from mysql.tables_priv where user in ('r1', 'r2', 'r3', 'r4') and host = "%"; +drop role if exists r1, r2, r3, r4; +drop table executor__grant.testatomic; + +# TestIssue2654 +DROP USER IF EXISTS 'test'@'%'; +CREATE USER 'test'@'%' IDENTIFIED BY 'test'; +GRANT SELECT ON executor__grant.* to 'test'; +SELECT user,host FROM mysql.user WHERE user='test' and host='%'; + +# TestGrantUnderANSIQuotes +SET SQL_MODE='ANSI_QUOTES'; +# Fix a bug that the GrantExec fails in ANSI_QUOTES sql mode +# The bug is caused by the improper usage of double quotes like: +# INSERT INTO mysql.user ... VALUES ("..", "..", "..") +GRANT ALL PRIVILEGES ON video_ulimit.* TO web@'%' IDENTIFIED BY 'eDrkrhZ>l2sV'; +REVOKE ALL PRIVILEGES ON video_ulimit.* FROM web@'%'; +DROP USER IF EXISTS 'web'@'%'; +set sql_mode=default; + +# TestMaintainRequire +DROP USER if exists 'ssl_auser'@'%'; +DROP USER if exists 'ssl_buser'@'%'; +DROP USER if exists 'ssl_cuser'@'%'; +DROP USER if exists 'ssl_duser'@'%'; +DROP USER if exists 'ssl_euser'@'%'; +DROP USER if exists 'ssl_fuser'@'%'; +DROP USER if exists 'ssl_guser'@'%'; +drop user if exists 'u1'@'%'; +drop user if exists 'u2'@'%'; +drop user if exists 'u3'@'%'; +CREATE USER 'ssl_auser'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'; +CREATE USER 'ssl_buser'@'%' require subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'; +CREATE USER 'ssl_cuser'@'%' require cipher 'AES128-GCM-SHA256'; +CREATE USER 'ssl_duser'@'%'; +CREATE USER 'ssl_euser'@'%' require none; +CREATE USER 'ssl_fuser'@'%' require ssl; +CREATE USER 'ssl_guser'@'%' require x509; +select * from mysql.global_priv where `user` like 'ssl_%'; +CREATE USER 'u1'@'%'; +GRANT ALL ON *.* TO 'u1'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' and subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH'; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'; +GRANT ALL ON *.* TO 'u1'@'%' require cipher 'AES128-GCM-SHA256'; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'; +GRANT select ON *.* TO 'u1'@'%'; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'; +GRANT ALL ON *.* TO 'u1'@'%' require none; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u1'; +CREATE USER 'u2'@'%'; +alter user 'u2'@'%' require ssl; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'; +alter user 'u2'@'%' require x509; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'; +alter user 'u2'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'; +alter user 'u2'@'%' require none; +select priv from mysql.global_priv where `Host` = '%' and `User` = 'u2'; +CREATE USER 'u3'@'%' require issuer '/CN=TiDB admin/OU=TiDB/O=PingCAP/L=San Francisco/ST=California/C=US' subject '/CN=tester1/OU=TiDB/O=PingCAP.Inc/L=Haidian/ST=Beijing/C=ZH' cipher 'AES128-GCM-SHA256'; +show create user 'u3'; +-- error 1105 +CREATE USER 'u4'@'%' require issuer 'CN=TiDB,OU=PingCAP'; +-- error 1105 +CREATE USER 'u5'@'%' require subject '/CN=TiDB\OU=PingCAP'; +-- error 1105 +CREATE USER 'u6'@'%' require subject '/CN=TiDB\NC=PingCAP'; +-- error 1105 +CREATE USER 'u7'@'%' require cipher 'AES128-GCM-SHA1'; +-- error 1105 +CREATE USER 'u8'@'%' require subject '/CN'; +-- error 1105 +CREATE USER 'u9'@'%' require cipher 'TLS_AES_256_GCM_SHA384' cipher 'RC4-SHA'; +-- error 1105 +CREATE USER 'u9'@'%' require issuer 'CN=TiDB,OU=PingCAP' issuer 'CN=TiDB,OU=PingCAP2'; +-- error 1105 +CREATE USER 'u9'@'%' require subject '/CN=TiDB\OU=PingCAP' subject '/CN=TiDB\OU=PingCAP2'; +-- error 1064 +CREATE USER 'u9'@'%' require ssl ssl; +-- error 1064 +CREATE USER 'u9'@'%' require x509 x509; + +# TestMaintainAuthString +drop user if exists 'maint_auth_str1'@'%'; +CREATE USER 'maint_auth_str1'@'%' IDENTIFIED BY 'foo'; +SELECT authentication_string FROM mysql.user WHERE `Host` = '%' and `User` = 'maint_auth_str1'; +ALTER USER 'maint_auth_str1'@'%' REQUIRE SSL; +SELECT authentication_string FROM mysql.user WHERE `Host` = '%' and `User` = 'maint_auth_str1'; + +# TestIssue22721 +drop table if exists xx; +drop user if exists 'sync_ci_data'@'%'; +create table xx (id int); +CREATE USER 'sync_ci_data'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU'; +GRANT USAGE ON *.* TO 'sync_ci_data'@'%'; +GRANT USAGE ON sync_ci_data.* TO 'sync_ci_data'@'%'; +GRANT USAGE ON executor__grant.* TO 'sync_ci_data'@'%'; +GRANT USAGE ON executor__grant.xx TO 'sync_ci_data'@'%'; + +# TestPerformanceSchemaPrivGrant +drop user if exists issue27867; +create user issue27867; +-- error 1044 +grant all on performance_schema.* to issue27867; +-- error 1044 +grant all on PERFormanCE_scHemA.* to issue27867; +grant select on performance_schema.* to issue27867; +-- error 1044 +grant insert on performance_schema.* to issue27867; +-- error 1044 +grant update on performance_schema.* to issue27867; +-- error 1044 +grant delete on performance_schema.* to issue27867; +-- error 1044 +grant drop on performance_schema.* to issue27867; +-- error 1044 +grant lock tables on performance_schema.* to issue27867; +-- error 1044 +grant create on performance_schema.* to issue27867; +-- error 1044 +grant references on performance_schema.* to issue27867; +-- error 1044 +grant alter on PERFormAnCE_scHemA.* to issue27867; +-- error 1044 +grant execute on performance_schema.* to issue27867; +-- error 1044 +grant index on PERFormanCE_scHemA.* to issue27867; +-- error 1044 +grant create view on performance_schema.* to issue27867; +-- error 1044 +grant show view on performance_schema.* to issue27867; +drop user issue27867; + +# TestGrantDynamicPrivs +drop user if exists dyn; +create user dyn; +-- error 3619 +GRANT BACKUP_ADMIN ON executor__grant.* TO dyn; +-- error 3929 +GRANT BOGUS_GRANT ON *.* TO dyn; +GRANT BACKUP_Admin ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +GRANT SYSTEM_VARIABLES_ADMIN, BACKUP_ADMIN ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +GRANT ROLE_ADMIN, BACKUP_ADMIN ON *.* TO dyn WITH GRANT OPTION; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +GRANT SYSTEM_VARIABLES_ADMIN, Select, ROLE_ADMIN ON *.* TO dyn; +SELECT Grant_Priv FROM mysql.user WHERE `Host` = '%' AND `User` = 'dyn'; +SELECT WITH_GRANT_OPTION FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' AND Priv='SYSTEM_VARIABLES_ADMIN'; +GRANT CONNECTION_ADMIN, Insert ON *.* TO dyn WITH GRANT OPTION; +SELECT Grant_Priv FROM mysql.user WHERE `Host` = '%' AND `User` = 'dyn'; +SELECT WITH_GRANT_OPTION FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' AND Priv='CONNECTION_ADMIN'; + +# TestNonExistTableIllegalGrant +drop user if exists u29302; +create user u29302; +-- error 1144 +grant create temporary tables on NotExistsD29302.NotExistsT29302 to u29302; +-- error 1144 +grant lock tables on executor__grant.NotExistsT29302 to u29302; +-- error 1221 +grant create temporary tables (NotExistsCol) on NotExistsD29302.NotExistsT29302 to u29302; +drop user u29302; + +# TestIssue34610 +drop table if exists t1; +drop user if exists user_1@localhost; +CREATE USER user_1@localhost; +CREATE TABLE T1(f1 INT); +-- error 1050 +CREATE TABLE t1(f1 INT); +GRANT SELECT ON T1 to user_1@localhost; +GRANT SELECT ON t1 to user_1@localhost; + +# TestIssue38293 +DROP USER IF EXISTS test; +CREATE USER test; +GRANT SELECT ON `mysql`.`db` TO test; +SELECT `Grantor` FROM `mysql`.`tables_priv` WHERE User = 'test'; + +# TestGrantOnNonExistTable +drop user if exists genius; +drop user if exists u29268; +create user genius; +-- error 1146 +select * from nonexist; +-- error 1146 +grant Select,Insert on nonexist to 'genius'; +create table if not exists xx (id int); +grant Select,Insert on XX to 'genius'; +grant Select,Insert on xx to 'genius'; +grant Select,Update on executor__grant.xx to 'genius'; +CREATE DATABASE d29268; +USE d29268; +CREATE USER u29268; +-- error 1146 +GRANT SELECT ON t29268 TO u29268; +-- error 1146 +GRANT DROP, INSERT ON t29268 TO u29268; +-- error 1146 +GRANT UPDATE, CREATE VIEW, SHOW VIEW ON t29268 TO u29268; +-- error 1146 +GRANT DELETE, REFERENCES, ALTER ON t29268 TO u29268; +GRANT CREATE ON t29268 TO u29268; +GRANT CREATE, SELECT ON t29268 TO u29268; +GRANT CREATE, DROP, INSERT ON t29268 TO u29268; + +connect (conn1, localhost, u29268,, d29268); +CREATE TABLE t29268 (c1 int); +INSERT INTO t29268 VALUES (1), (2); +SELECT c1 FROM t29268; +DROP TABLE t29268; +disconnect conn1; + +GRANT ALL ON t29268 TO u29268; +DROP USER u29268; +DROP DATABASE IF EXISTS d29268; +use executor__grant; + diff --git a/tests/integrationtest/t/executor/import_into.test b/tests/integrationtest/t/executor/import_into.test new file mode 100644 index 0000000000000..6c1dec4ae4e21 --- /dev/null +++ b/tests/integrationtest/t/executor/import_into.test @@ -0,0 +1,174 @@ +# TestImportIntoExplicitTransaction +drop table if exists t; +create table t (id int); +BEGIN; +-- error 1105 +IMPORT INTO t FROM '/file.csv'; +commit; + +# TestImportIntoOptionsNegativeCase +drop table if exists t; +create table t (id int); +-- error 8163 +import into t from '/file.csv' with xx=1; +-- error 8164 +import into t from '/file.csv' with detached=1; +-- error 8164 +import into t from '/file.csv' with character_set; +-- error 8165 +import into t from '/file.csv' with detached, detached; +-- error 8164 +import into t from '/file.csv' with character_set=true; +-- error 8164 +import into t from '/file.csv' with character_set=null; +-- error 8164 +import into t from '/file.csv' with character_set=1; +-- error 8164 +import into t from '/file.csv' with character_set=true; +-- error 8164 +import into t from '/file.csv' with character_set=''; +-- error 8164 +import into t from '/file.csv' with character_set='aa'; +-- error 8164 +import into t from '/file.csv' with fields_terminated_by=null; +-- error 8164 +import into t from '/file.csv' with fields_terminated_by=1; +-- error 8164 +import into t from '/file.csv' with fields_terminated_by=true; +-- error 8164 +import into t from '/file.csv' with fields_terminated_by=''; +-- error 8164 +import into t from '/file.csv' with fields_enclosed_by=null; +-- error 8164 +import into t from '/file.csv' with fields_enclosed_by='aa'; +-- error 8164 +import into t from '/file.csv' with fields_enclosed_by=1; +-- error 8164 +import into t from '/file.csv' with fields_enclosed_by=true; +-- error 8164 +import into t from '/file.csv' with fields_escaped_by=null; +-- error 8164 +import into t from '/file.csv' with fields_escaped_by='aa'; +-- error 8164 +import into t from '/file.csv' with fields_escaped_by=1; +-- error 8164 +import into t from '/file.csv' with fields_escaped_by=true; +-- error 8164 +import into t from '/file.csv' with fields_defined_null_by=null; +-- error 8164 +import into t from '/file.csv' with fields_defined_null_by=1; +-- error 8164 +import into t from '/file.csv' with fields_defined_null_by=true; +-- error 8164 +import into t from '/file.csv' with lines_terminated_by=null; +-- error 8164 +import into t from '/file.csv' with lines_terminated_by=1; +-- error 8164 +import into t from '/file.csv' with lines_terminated_by=true; +-- error 8164 +import into t from '/file.csv' with lines_terminated_by=''; +-- error 8164 +import into t from '/file.csv' with skip_rows=null; +-- error 8164 +import into t from '/file.csv' with skip_rows=''; +-- error 8164 +import into t from '/file.csv' with skip_rows=-1; +-- error 8164 +import into t from '/file.csv' with skip_rows=true; +-- error 8164 +import into t from '/file.csv' with split_file='aa'; +-- error 8164 +import into t from '/file.csv' with split_file, skip_rows=2; +-- error 8164 +import into t from '/file.csv' with disk_quota='aa'; +-- error 8164 +import into t from '/file.csv' with disk_quota='220MiBxxx'; +-- error 8164 +import into t from '/file.csv' with disk_quota=1; +-- error 8164 +import into t from '/file.csv' with disk_quota=false; +-- error 8164 +import into t from '/file.csv' with disk_quota=null; +-- error 8164 +import into t from '/file.csv' with thread='aa'; +-- error 8164 +import into t from '/file.csv' with thread=0; +-- error 8164 +import into t from '/file.csv' with thread=false; +-- error 8164 +import into t from '/file.csv' with thread=-100; +-- error 8164 +import into t from '/file.csv' with thread=null; +-- error 8164 +import into t from '/file.csv' with max_write_speed='aa'; +-- error 8164 +import into t from '/file.csv' with max_write_speed='11aa'; +-- error 8164 +import into t from '/file.csv' with max_write_speed=null; +-- error 8164 +import into t from '/file.csv' with max_write_speed=-1; +-- error 8164 +import into t from '/file.csv' with max_write_speed=false; +-- error 8164 +import into t from '/file.csv' with checksum_table=''; +-- error 8164 +import into t from '/file.csv' with checksum_table=123; +-- error 8164 +import into t from '/file.csv' with checksum_table=false; +-- error 8164 +import into t from '/file.csv' with checksum_table=null; +-- error 8164 +import into t from '/file.csv' with record_errors='aa'; +-- error 8164 +import into t from '/file.csv' with record_errors='111aa'; +-- error 8164 +import into t from '/file.csv' with record_errors=-123; +-- error 8164 +import into t from '/file.csv' with record_errors=null; +-- error 8164 +import into t from '/file.csv' with record_errors=true; +-- error 8164 +import into t from '/file.csv' with cloud_storage_uri=123; +-- error 8164 +import into t from '/file.csv' with cloud_storage_uri=':'; +-- error 8164 +import into t from '/file.csv' with cloud_storage_uri='sdsd'; +-- error 8164 +import into t from '/file.csv' with cloud_storage_uri='http://sdsd'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with character_set='utf8'; +-- error 8166 +import into t from '/file.csv' format 'sql' with character_set='utf8'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with fields_terminated_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with fields_terminated_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with fields_enclosed_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with fields_enclosed_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with fields_escaped_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with fields_escaped_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with fields_defined_null_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with fields_defined_null_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with lines_terminated_by='a'; +-- error 8166 +import into t from '/file.csv' format 'sql' with lines_terminated_by='a'; +-- error 8166 +import into t from '/file.csv' format 'parquet' with skip_rows=1; +-- error 8166 +import into t from '/file.csv' format 'sql' with skip_rows=1; +-- error 8166 +import into t from '/file.csv' format 'parquet' with split_file; +-- error 8166 +import into t from '/file.csv' format 'sql' with split_file; +-- error 8156 +import into t from ''; +-- error 8157 +import into t from '/a.csv' format 'xx'; + diff --git a/tests/integrationtest/t/executor/index_advise.test b/tests/integrationtest/t/executor/index_advise.test new file mode 100644 index 0000000000000..17f11ae545ac9 --- /dev/null +++ b/tests/integrationtest/t/executor/index_advise.test @@ -0,0 +1,166 @@ +# TestIndexJoinProjPattern +set @@session.tidb_opt_advanced_join_hint=0; +drop table if exists t1, t2; +create table t1( + pnbrn_cnaps varchar(5) not null, + new_accno varchar(18) not null, + primary key(pnbrn_cnaps,new_accno) nonclustered +); +create table t2( + pnbrn_cnaps varchar(5) not null, + txn_accno varchar(18) not null, + txn_dt date not null, + yn_frz varchar(1) default null +); +insert into t1(pnbrn_cnaps,new_accno) values ("40001","123"); +insert into t2(pnbrn_cnaps, txn_accno, txn_dt, yn_frz) values ("40001","123","20221201","0"); +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +explain format='brief' update +/*+ inl_join(a) */ +t2 b, +( + select t1.pnbrn_cnaps, + t1.new_accno + from t1 + where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; + +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +explain format='brief' update +/*+ inl_join(a) */ +t2 b, +( + select t1.pnbrn_cnaps, + t1.new_accno + from t1 + where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; + +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +update +/*+ inl_join(a) */ +t2 b, +( + select t1.pnbrn_cnaps, + t1.new_accno + from t1 + where t1.pnbrn_cnaps = '40001' +) a +set b.yn_frz = '1' +where b.txn_dt = str_to_date('20221201', '%Y%m%d') +and b.pnbrn_cnaps = a.pnbrn_cnaps +and b.txn_accno = a.new_accno; +select yn_frz from t2; + +set @@session.tidb_opt_advanced_join_hint=default; +set @@session.tidb_enable_inl_join_inner_multi_pattern=default; + +# TestIndexJoinSelPattern +set @@tidb_opt_advanced_join_hint=0; +drop table if exists tbl_miss, tbl_src; +create table tbl_miss( + id bigint(20) unsigned not null, + txn_dt date default null, + perip_sys_uuid varchar(32) not null, + rvrs_idr varchar(1) not null, + primary key(id) clustered, + key idx1 (txn_dt, perip_sys_uuid, rvrs_idr) +); +insert into tbl_miss (id,txn_dt,perip_sys_uuid,rvrs_idr) values (1,"20221201","123","1"); +create table tbl_src( + txn_dt date default null, + uuid varchar(32) not null, + rvrs_idr char(1), + expd_inf varchar(5000), + primary key(uuid,rvrs_idr) nonclustered +); +insert into tbl_src (txn_dt,uuid,rvrs_idr) values ("20221201","123","1"); + +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +explain format='brief' select /*+ use_index(mis,) inl_join(src) */ + * +from tbl_miss mis + ,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end +) != '01'; + +set @@session.tidb_enable_inl_join_inner_multi_pattern='ON'; +explain format='brief' select /*+ use_index(mis,) inl_join(src) */ + * +from tbl_miss mis + ,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end +) != '01'; +select /*+ use_index(mis,) inl_join(src) */ + * +from tbl_miss mis + ,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end +) != '01'; + +set @@session.tidb_enable_inl_join_inner_multi_pattern='OFF'; +select /*+ use_index(mis,) inl_join(src) */ + * +from tbl_miss mis + ,tbl_src src +where src.txn_dt >= str_to_date('20221201', '%Y%m%d') +and mis.id between 1 and 10000 +and mis.perip_sys_uuid = src.uuid +and mis.rvrs_idr = src.rvrs_idr +and mis.txn_dt = src.txn_dt +and ( + case when isnull(src.expd_inf) = 1 then '' + else + substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, + instr(substr(concat_ws('',src.expd_inf,'~~'), + instr(concat_ws('',src.expd_inf,'~~'),'~~a4') + 4, length(concat_ws('',src.expd_inf,'~~'))),'~~') -1) + end +) != '01'; + +set @@tidb_opt_advanced_join_hint=default; +set @@session.tidb_enable_inl_join_inner_multi_pattern=default; diff --git a/tests/integrationtest/t/executor/index_lookup_merge_join.test b/tests/integrationtest/t/executor/index_lookup_merge_join.test new file mode 100644 index 0000000000000..13b6a97f99133 --- /dev/null +++ b/tests/integrationtest/t/executor/index_lookup_merge_join.test @@ -0,0 +1,108 @@ +# TestIssue28052 +drop table if exists t; +CREATE TABLE `t` (`col_tinyint_key_signed` tinyint(4) DEFAULT NULL,`col_year_key_signed` year(4) DEFAULT NULL,KEY `col_tinyint_key_signed` (`col_tinyint_key_signed`),KEY `col_year_key_signed` (`col_year_key_signed`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(-100,NULL); +select /*+ inl_merge_join(t1, t2) */ count(*) from t t1 right join t t2 on t1. `col_year_key_signed` = t2. `col_tinyint_key_signed`; + +# TestIssue18631 +drop table if exists t1, t2; +create table t1(a int, b int, c int, d int, primary key(a,b,c)); +create table t2(a int, b int, c int, d int, primary key(a,b,c)); +insert into t1 values(1,1,1,1),(2,2,2,2),(3,3,3,3); +insert into t2 values(1,1,1,1),(2,2,2,2); +explain format = 'brief' select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc; +select /*+ inl_merge_join(t1,t2) */ * from t1 left join t2 on t1.a = t2.a and t1.c = t2.c and t1.b = t2.b order by t1.a desc; + +# TestIssue19408 +drop table if exists t1, t2; +create table t1 (c_int int, primary key(c_int)); +create table t2 (c_int int, unique key (c_int)) partition by hash (c_int) partitions 4; +insert into t1 values (1), (2), (3), (4), (5); +insert into t2 select * from t1; +begin; +delete from t1 where c_int = 1; +--sorted_result +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +--sorted_result +select /*+ INL_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +--sorted_result +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1, t2 where t1.c_int = t2.c_int; +commit; + +# TestIssue20137 +drop table if exists t1, t2; +create table t1 (id bigint(20) unsigned, primary key(id)); +create table t2 (id bigint(20) unsigned); +insert into t1 values (8738875760185212610); +insert into t1 values (9814441339970117597); +insert into t2 values (8738875760185212610); +insert into t2 values (9814441339970117597); +select /*+ INL_MERGE_JOIN(t1, t2) */ * from t2 left join t1 on t1.id = t2.id order by t1.id; + +# TestIndexJoinOnSinglePartitionTable +set @@tidb_opt_advanced_join_hint=0; +set @@tidb_partition_prune_mode= 'static'; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +insert into t1 values (1, 'Alice'); +insert into t2 values (1, 'Bob'); +analyze table t1, t2; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +show warnings; +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +set @@tidb_partition_prune_mode= 'dynamic'; +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +create table t2 (c_int int, c_str varchar(40), primary key (c_int) ) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue ); +insert into t1 values (1, 'Alice'); +insert into t2 values (1, 'Bob'); +analyze table t1, t2; +select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_MERGE_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +show warnings; +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +explain format = 'brief' select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 partition(p0) on t1.c_int = t2.c_int and t1.c_str < t2.c_str; +set @@tidb_opt_advanced_join_hint=DEFAULT; +set @@tidb_partition_prune_mode= DEFAULT; + +# TestIssue20400 +drop table if exists t, s; +create table s(a int, index(a)); +create table t(a int); +insert into t values(1); +select /*+ hash_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1; +select /*+ inl_merge_join(t,s)*/ * from t left join s on t.a=s.a and t.a>1; + +# TestIssue20549 +drop table if exists t1, t2; +CREATE TABLE `t1` (`id` bigint(20) NOT NULL AUTO_INCREMENT, `t2id` bigint(20) DEFAULT NULL, PRIMARY KEY (`id`), KEY `t2id` (`t2id`)); +INSERT INTO `t1` VALUES (1,NULL); +CREATE TABLE `t2` (`id` bigint(20) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id`)); +SELECT /*+ INL_MERGE_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id; +SELECT /*+ HASH_JOIN(t1,t2) */ 1 from t1 left outer join t2 on t1.t2id=t2.id; + +# TestIssue24473 +drop table if exists x; +CREATE TABLE `x` ( `a` enum('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`)); +insert into x values("x"),("x"),("b"),("y"); +--sorted_result +SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +--sorted_result +SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; + +# TestIssue25669 +drop table if exists x; +CREATE TABLE `x` ( `a` set('y','b','1','x','0','null') DEFAULT NULL, KEY `a` (`a`)); +insert into x values("x"),("x"),("b"),("y"); +--sorted_result +SELECT /*+ merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; +--sorted_result +SELECT /*+ inl_merge_join (t2,t3) */ t2.a,t3.a FROM x t2 inner join x t3 on t2.a = t3.a; + diff --git a/tests/integrationtest/t/executor/infoschema_reader.test b/tests/integrationtest/t/executor/infoschema_reader.test new file mode 100644 index 0000000000000..b25bb80e93da6 --- /dev/null +++ b/tests/integrationtest/t/executor/infoschema_reader.test @@ -0,0 +1,241 @@ +# TestProfiling +select * from information_schema.profiling; +set @@profiling=1; +select * from information_schema.profiling; + +# TestSchemataTables +select * from information_schema.SCHEMATA where schema_name='mysql'; +drop user if exists schemata_tester; +create user schemata_tester; + +connect (conn1, localhost, schemata_tester,, information_schema); +select count(*) from information_schema.SCHEMATA; +select * from information_schema.SCHEMATA where schema_name='mysql'; +select * from information_schema.SCHEMATA where schema_name='INFORMATION_SCHEMA'; + +connection default; +CREATE ROLE r_mysql_priv; +GRANT ALL PRIVILEGES ON mysql.* TO r_mysql_priv; +GRANT r_mysql_priv TO schemata_tester; + +connection conn1; +set role r_mysql_priv; +select count(*) from information_schema.SCHEMATA; +select * from information_schema.SCHEMATA; + +connection default; +disconnect conn1; + +# TestTableIDAndIndexID +drop table if exists executor__infoschema_reader.t; +create table executor__infoschema_reader.t (a int, b int, primary key(a), key k1(b)); +select index_id from information_schema.tidb_indexes where table_schema = 'executor__infoschema_reader' and table_name = 't'; +select tidb_table_id > 0 from information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't'; + +# TestSchemataCharacterSet +drop database if exists `foo`; +CREATE DATABASE `foo` DEFAULT CHARACTER SET = 'utf8mb4'; +select default_character_set_name, default_collation_name FROM information_schema.SCHEMATA WHERE schema_name = 'foo'; +drop database `foo`; + +# TestViews +drop view if exists executor__infoschema_reader.v1; +CREATE DEFINER='root'@'localhost' VIEW executor__infoschema_reader.v1 AS SELECT 1; +select TABLE_COLLATION is null from INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='VIEW'; +SELECT * FROM information_schema.views WHERE table_schema='executor__infoschema_reader' AND table_name='v1'; +SELECT table_catalog, table_schema, table_name, table_type, engine, version, row_format, table_rows, avg_row_length, data_length, max_data_length, index_length, data_free, auto_increment, update_time, check_time, table_collation, checksum, create_options, table_comment FROM information_schema.tables WHERE table_schema='executor__infoschema_reader' AND table_name='v1'; + +# TestColumnsTables +drop table if exists t; +create table t (bit bit(10) DEFAULT b'100'); +SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = 'executor__infoschema_reader' AND TABLE_NAME = 't'; +drop table if exists t; +set time_zone='+08:00'; +drop table if exists t; +create table t (b timestamp(3) NOT NULL DEFAULT '1970-01-01 08:00:01.000'); +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +set time_zone='+04:00'; +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +set time_zone=default; +drop table if exists t; +create table t (a bit DEFAULT (rand())); +select column_default from information_schema.columns where TABLE_NAME='t' and TABLE_SCHEMA='executor__infoschema_reader'; +drop table if exists t; +CREATE TABLE t (`COL3` bit(1) NOT NULL,b year) ; +select column_type from information_schema.columns where TABLE_SCHEMA = 'executor__infoschema_reader' and TABLE_NAME = 't'; +## For issue: https://github.com/pingcap/tidb/issues/43379 +select ordinal_position from information_schema.columns where table_schema=database() and table_name='t' and column_name='b'; + +# TestEngines +select * from information_schema.ENGINES; + +# TestDataTypesMaxLengthAndOctLength +# https://github.com/pingcap/tidb/issues/25467 +drop table if exists t; +create table t (a varchar(255) collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a varchar(255) collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a varchar(255) collate utf8_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a char(10) collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a char(10) collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a set('a', 'b', 'cccc') collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a set('a', 'b', 'cccc') collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a enum('a', 'b', 'cccc') collate ascii_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; +create table t (a enum('a', 'b', 'cccc') collate utf8mb4_bin); +select character_maximum_length, character_octet_length from information_schema.columns where table_schema=(select database()) and table_name='t'; +drop table t; + +# TestDDLJobs +set global tidb_ddl_enable_fast_reorg = false; +drop database if exists test_ddl_jobs; +create database test_ddl_jobs; +select db_name, job_type from information_schema.DDL_JOBS limit 1; +use test_ddl_jobs; +create table t (a int); +select db_name, table_name, job_type from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and table_name = 't'; +select job_type from information_schema.DDL_JOBS group by job_type having job_type = 'create table'; +select distinct job_type from information_schema.DDL_JOBS where job_type = 'create table' and start_time > str_to_date('20190101','%Y%m%d%H%i%s'); +drop user if exists DDL_JOBS_tester; +create user DDL_JOBS_tester; + +connect(conn1, localhost, DDL_JOBS_tester,, information_schema); +select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't'; + +connection default; +CREATE ROLE r_priv; +GRANT ALL PRIVILEGES ON test_ddl_jobs.* TO r_priv; +GRANT r_priv TO DDL_JOBS_tester; + +connection conn1; +set role r_priv; +select DB_NAME, TABLE_NAME from information_schema.DDL_JOBS where DB_NAME = 'test_ddl_jobs' and TABLE_NAME = 't'; + +connection default; +create table tt (a int); +alter table tt add index t(a), add column b int; +select db_name, table_name, job_type from information_schema.DDL_JOBS limit 3; + +disconnect conn1; +drop database test_ddl_jobs; +use executor__infoschema_reader; +set global tidb_ddl_enable_fast_reorg = default; + +# TestKeyColumnUsage +select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta' and COLUMN_NAME='table_id'; +create user key_column_tester; + +connect (conn1, localhost, key_column_tester,, information_schema); +select * from information_schema.KEY_COLUMN_USAGE where TABLE_NAME != 'CLUSTER_SLOW_QUERY'; + +connection default; +CREATE ROLE r_stats_meta ; +GRANT ALL PRIVILEGES ON mysql.stats_meta TO r_stats_meta; +GRANT r_stats_meta TO key_column_tester; + +connection conn1; +set role r_stats_meta; +select count(*)>0 from information_schema.KEY_COLUMN_USAGE where TABLE_NAME='stats_meta'; + +connection default; +disconnect conn1; + +# TestPartitionTablesStatsCache +# https://github.com/pingcap/tidb/issues/32693 +drop table if exists e, e2; +CREATE TABLE e ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)) PARTITION BY RANGE (id) ( + PARTITION p0 VALUES LESS THAN (50), + PARTITION p1 VALUES LESS THAN (100), + PARTITION p2 VALUES LESS THAN (150), + PARTITION p3 VALUES LESS THAN (MAXVALUE)); +CREATE TABLE e2 ( id INT NOT NULL, fname VARCHAR(30), lname VARCHAR(30)); +SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e' and table_schema=(select database()); +INSERT INTO e VALUES (1669, "Jim", "Smith"), (337, "Mary", "Jones"), (16, "Frank", "White"), (2005, "Linda", "Black"); +set tidb_enable_exchange_partition='on'; +ALTER TABLE e EXCHANGE PARTITION p0 WITH TABLE e2; +INSERT INTO e VALUES (41, "Michael", "Green"); +analyze table e; +SELECT PARTITION_NAME, TABLE_ROWS FROM INFORMATION_SCHEMA.PARTITIONS WHERE TABLE_NAME = 'e'; +set tidb_enable_exchange_partition=default; + +# TestMetricTables +select count(*) > 0 from information_schema.`METRICS_TABLES`; +select * from information_schema.`METRICS_TABLES` where table_name='tidb_qps'; + +# TestTableConstraintsTable +select * from information_schema.TABLE_CONSTRAINTS where TABLE_NAME='gc_delete_range'; + +# TestTableSessionVar +select * from information_schema.SESSION_VARIABLES where VARIABLE_NAME='tidb_retry_limit'; + +# TestSequences +drop sequence if exists seq, seq2; +CREATE SEQUENCE seq maxvalue 10000000; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq'; +DROP SEQUENCE seq; +CREATE SEQUENCE seq start = -1 minvalue -1 maxvalue 10 increment 1 cache 10; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq'; +CREATE SEQUENCE seq2 start = -9 minvalue -10 maxvalue 10 increment -1 cache 15; +SELECT * FROM information_schema.sequences WHERE sequence_schema='executor__infoschema_reader' AND sequence_name='seq2'; +SELECT TABLE_CATALOG, TABLE_SCHEMA, TABLE_NAME , TABLE_TYPE, ENGINE, TABLE_ROWS FROM information_schema.tables WHERE TABLE_TYPE='SEQUENCE' AND TABLE_NAME='seq2' and table_schema='executor__infoschema_reader'; + +# TestTablesPKType +drop table if exists t_int, t_implicit, t_common; +create table t_int (a int primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_int'; +set tidb_enable_clustered_index=int_only; +create table t_implicit (a varchar(64) primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_implicit'; +set tidb_enable_clustered_index=on; +create table t_common (a varchar(64) primary key, b int); +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'executor__infoschema_reader' and table_name = 't_common'; +SELECT TIDB_PK_TYPE FROM information_schema.tables where table_schema = 'INFORMATION_SCHEMA' and table_name = 'TABLES'; +set tidb_enable_clustered_index=default; + +# TestNullColumns +drop table if exists t; +CREATE TABLE t ( id int DEFAULT NULL); +CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`1.1.1.1` SQL SECURITY DEFINER VIEW `v_test` (`type`) AS SELECT NULL AS `type` FROM `t` AS `f`; +select * from information_schema.columns where TABLE_SCHEMA = 'executor__infoschema_reader' and TABLE_NAME = 'v_test'; + +# TestUserPrivilegesTable +drop user if exists usageuser; +create user usageuser; + +connect (conn1, localhost, usageuser,, information_schema); +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; + +connection default; +GRANT SELECT ON *.* to usageuser; + +connection conn1; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; + +connection default; +GRANT SELECT ON *.* to usageuser WITH GRANT OPTION; + +connection conn1; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'"; + +connection default; +GRANT BACKUP_ADMIN ON *.* to usageuser; + +connection conn1; +SELECT * FROM information_schema.user_privileges WHERE grantee="'usageuser'@'%'" ORDER BY privilege_type; + +connection default; +disconnect conn1; diff --git a/tests/integrationtest/t/executor/insert.test b/tests/integrationtest/t/executor/insert.test new file mode 100644 index 0000000000000..89c9c16cd9269 --- /dev/null +++ b/tests/integrationtest/t/executor/insert.test @@ -0,0 +1,926 @@ +# TestClusterIndexInsertOnDuplicateKey +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(a char(20), b int, primary key(a)); +insert into t values('aa', 1), ('bb', 1); +-- error 1062 +insert into t values('aa', 2); +drop table t; +create table t(a char(20), b varchar(30), c varchar(10), primary key(a, b, c)); +insert into t values ('a', 'b', 'c'), ('b', 'a', 'c'); +-- error 1062 +insert into t values ('a', 'b', 'c'); +set tidb_enable_clustered_index = default; + +# TestPaddingCommonHandle +set tidb_enable_clustered_index = on; +drop table if exists t1; +create table t1(c1 decimal(6,4), primary key(c1)); +insert into t1 set c1 = 0.1; +insert into t1 set c1 = 0.1 on duplicate key update c1 = 1; +select * from t1; +set tidb_enable_clustered_index = default; + +# TestInsertReorgDelete +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2004'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = 2004; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 bit); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int unsigned); +insert into t1 set c1 = 1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 smallint); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 int); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal(6,4)); +insert into t1 set c1 = '1.1'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 decimal); +insert into t1 set c1 = 1.1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 numeric); +insert into t1 set c1 = -1; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 float); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.2; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 double); +insert into t1 set c1 = 1.3; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 real); +insert into t1 set c1 = 1.4; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 date); +insert into t1 set c1 = '2020-01-01'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 time); +insert into t1 set c1 = '20:00:00'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 datetime); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 timestamp); +insert into t1 set c1 = '2020-01-01 22:22:22'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 year); +insert into t1 set c1 = '2020'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 char(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varchar(15)); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 binary(3)); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 varbinary(3)); +insert into t1 set c1 = 'b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 blob); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 text); +insert into t1 set c1 = 'test'; +alter table t1 add index idx(c1(3)); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 enum('a', 'b')); +insert into t1 set c1 = 'a'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; +drop table if exists t1; +create table t1(c1 set('a', 'b')); +insert into t1 set c1 = 'a,b'; +alter table t1 add index idx(c1); +delete from t1; +admin check table t1; + +# TestUpdateDuplicateKey +drop table if exists c; +create table c(i int,j int,k int,primary key(i,j,k)); +insert into c values(1,2,3); +insert into c values(1,2,4); +-- error 1062 +update c set i=1,j=2,k=4 where i=1 and j=2 and k=3; + +# TestIssue37187 +drop table if exists t1, t2; +create table t1 (a int(11) ,b varchar(100) ,primary key (a)); +create table t2 (c int(11) ,d varchar(100) ,primary key (c)); +prepare in1 from 'insert into t1 (a,b) select c,null from t2 t on duplicate key update b=t.d'; +execute in1; + +# TestInsertWrongValueForField +drop table if exists t1; +create table t1(a bigint); +-- error 1366 +insert into t1 values("asfasdfsajhlkhlksdaf"); +drop table if exists t1; +create table t1(a varchar(10)) charset ascii; +-- error 1366 +insert into t1 values('我'); +drop table if exists t1; +create table t1(a char(10) charset utf8); +insert into t1 values('我'); +alter table t1 add column b char(10) charset ascii as ((a)); +select * from t1; +drop table if exists t; +create table t (a year); +-- error 1264 +insert into t values(2156); +DROP TABLE IF EXISTS ts; +CREATE TABLE ts (id int DEFAULT NULL, time1 TIMESTAMP NULL DEFAULT NULL); +SET @@sql_mode=''; +INSERT INTO ts (id, time1) VALUES (1, TIMESTAMP '1018-12-23 00:00:00'); +SHOW WARNINGS; +SELECT * FROM ts ORDER BY id; +SET @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1292 +INSERT INTO ts (id, time1) VALUES (2, TIMESTAMP '1018-12-24 00:00:00'); +DROP TABLE ts; +CREATE TABLE t0(c0 SMALLINT AUTO_INCREMENT PRIMARY KEY); +INSERT IGNORE INTO t0(c0) VALUES (194626268); +INSERT IGNORE INTO t0(c0) VALUES ('*'); +SHOW WARNINGS; +SET @@sql_mode=default; + +# TestInsertValueForCastDecimalField +drop table if exists t1; +create table t1(a decimal(15,2)); +insert into t1 values (1111111111111.01); +select * from t1; +select cast(a as decimal) from t1; + +# TestInsertForMultiValuedIndex +drop table if exists t1; +create table t1(a json, b int, unique index idx((cast(a as signed array)))); +insert into t1 values ('[1,11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +-- error 1062 +insert into t1 values ('[2, 222]', 2); +replace into t1 values ('[1, 10]', 10); +select * from t1; +replace into t1 values ('[1, 2]', 1); +select * from t1; +replace into t1 values ('[1, 11]', 1); +insert into t1 values ('[2, 22]', 2); +select * from t1; +insert ignore into t1 values ('[1]', 2); +select * from t1; +insert ignore into t1 values ('[1, 2]', 2); +select * from t1; +insert into t1 values ('[2]', 2) on duplicate key update b = 10; +select * from t1; +-- error 1062 +insert into t1 values ('[2, 1]', 2) on duplicate key update a = '[1,2]'; +-- error 1062 +insert into t1 values ('[1,2]', 2) on duplicate key update a = '[1,2]'; +-- error 1062 +insert into t1 values ('[11, 22]', 2) on duplicate key update a = '[1,2]'; + +# TestInsertDateTimeWithTimeZone +set time_zone="+09:00"; +drop table if exists t; +create table t (id int, c1 datetime not null default CURRENT_TIMESTAMP); +set TIMESTAMP = 1234; +insert t (id) values (1); +select * from t; +drop table if exists t; +create table t (dt datetime); +set @@time_zone='+08:00'; +delete from t; +insert into t values ('2020-10-22'); +select * from t; +delete from t; +insert into t values ('2020-10-22-16'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16-31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16:31-15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16:31:15-10'); +select * from t; +delete from t; +insert into t values ('2020.10-22'); +select * from t; +delete from t; +insert into t values ('2020-10.22-16'); +select * from t; +delete from t; +insert into t values ('2020-10-22.16-31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16.31-15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16.31.15+14'); +select * from t; +delete from t; +insert into t values ('2020-10:22'); +select * from t; +delete from t; +insert into t values ('2020-10-22:16'); +select * from t; +delete from t; +insert into t values ('2020-10-22-16:31'); +select * from t; +delete from t; +insert into t values ('2020-10-22 16-31:15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16.31.15+09:30'); +select * from t; +delete from t; +insert into t values ('2020.10-22:16'); +select * from t; +delete from t; +insert into t values ('2020-10.22-16:31'); +select * from t; +delete from t; +insert into t values ('2020-10-22.16-31:15'); +select * from t; +delete from t; +insert into t values ('2020-10-22T16:31.15+09:30'); +select * from t; +drop table if exists t; +create table t (dt datetime, ts timestamp); +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+00:00'; +select * from t; +delete from t; +set @@time_zone='-08:00'; +insert into t values ('2020-10-22T16:53:40Z', '2020-10-22T16:53:40Z'); +set @@time_zone='+08:00'; +select * from t; +delete from t; +set @@time_zone='-03:00'; +insert into t values ('2020-10-22T16:53:40+03:00', '2020-10-22T16:53:40+03:00'); +set @@time_zone='+08:00'; +select * from t; +delete from t; +set @@time_zone='+08:00'; +insert into t values ('2020-10-22T16:53:40+08:00', '2020-10-22T16:53:40+08:00'); +set @@time_zone='+08:00'; +select * from t; +drop table if exists t; +create table t (ts timestamp); +insert into t values ('2020-10-22T12:00:00Z'), ('2020-10-22T13:00:00Z'), ('2020-10-22T14:00:00Z'); +select count(*) from t where ts > '2020-10-22T12:00:00Z'; +set @@time_zone='+08:00'; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10+00:00', '2020-10-27T14:39:10.10+00:00'); +select * from t; +drop table if exists t; +create table t (dt datetime(1), ts timestamp(1)); +insert into t values ('2020-10-27T14:39:10.3+0200', '2020-10-27T14:39:10.3+0200'); +select * from t; +drop table if exists t; +create table t (dt datetime(6), ts timestamp(6)); +insert into t values ('2020-10-27T14:39:10.3-02', '2020-10-27T14:39:10.3-02'); +select * from t; +drop table if exists t; +create table t (dt datetime(2), ts timestamp(2)); +insert into t values ('2020-10-27T14:39:10.10Z', '2020-10-27T14:39:10.10Z'); +select * from t; +set time_zone=default; +set timestamp=default; + +# TestInsertZeroYear +drop table if exists t1; +create table t1(a year(4)); +insert into t1 values(0000),(00),("0000"),("000"), ("00"), ("0"), (79), ("79"); +select * from t1; +drop table if exists t; +create table t(f_year year NOT NULL DEFAULT '0000')ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +insert into t values(); +select * from t; +insert into t values('0000'); +select * from t; + +# TestAllowInvalidDates +drop table if exists t1, t2, t3, t4; +create table t1(d date); +create table t2(d datetime); +create table t3(d date); +create table t4(d datetime); +set sql_mode='STRICT_TRANS_TABLES,ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +select year(d), month(d), day(d) from t2; +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; + +truncate t1;truncate t2;truncate t3;truncate t4; +set sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values ('0000-00-00'); +insert into t2 values ('0000-00-00'); +insert into t1 values ('2019-00-00'); +insert into t2 values ('2019-00-00'); +insert into t1 values ('2019-01-00'); +insert into t2 values ('2019-01-00'); +insert into t1 values ('2019-00-01'); +insert into t2 values ('2019-00-01'); +insert into t1 values ('2019-02-31'); +insert into t2 values ('2019-02-31'); +select year(d), month(d), day(d) from t1; +select year(d), month(d), day(d) from t2; +insert t3 select d from t1; +select year(d), month(d), day(d) from t3; +insert t4 select d from t2; +select year(d), month(d), day(d) from t4; +set sql_mode=default; + +# TestPartitionInsertOnDuplicate +drop table if exists t1, t2, t3; +create table t1 (a int,b int,primary key(a,b)) partition by range(a) (partition p0 values less than (100),partition p1 values less than (1000)); +insert into t1 set a=1, b=1; +insert into t1 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t1; +create table t2 (a int,b int,primary key(a,b)) partition by hash(a) partitions 4; +insert into t2 set a=1,b=1; +insert into t2 set a=1,b=1 on duplicate key update a=1,b=1; +select * from t2; +CREATE TABLE t3 (a int, b int, c int, d int, e int, + PRIMARY KEY (a,b), + UNIQUE KEY (b,c,d) +) PARTITION BY RANGE ( b ) ( + PARTITION p0 VALUES LESS THAN (4), + PARTITION p1 VALUES LESS THAN (7), + PARTITION p2 VALUES LESS THAN (11) +); +insert into t3 values (1,2,3,4,5); +insert into t3 values (1,2,3,4,5),(6,2,3,4,6) on duplicate key update e = e + values(e); +select * from t3; + +# TestBit +drop table if exists t1; +create table t1 (a bit(3)); +-- error 1406 +insert into t1 values(-1); +-- error 1406 +insert into t1 values(9); +create table t64 (a bit(64)); +insert into t64 values(-1); +insert into t64 values(18446744073709551615); +-- error 1264 +insert into t64 values(18446744073709551616); + +# TestJiraIssue5366 +drop table if exists bug; +create table bug (a varchar(100)); +insert into bug select ifnull(JSON_UNQUOTE(JSON_EXTRACT('[{"amount":2000,"feeAmount":0,"merchantNo":"20190430140319679394","shareBizCode":"20160311162_SECOND"}]', '$[0].merchantNo')),'') merchant_no union SELECT '20180531557' merchant_no; +--sorted_result +select * from bug; + +# TestDMLCast +drop table if exists t; +create table t (a int, b double); +insert into t values (ifnull('',0)+0, 0); +insert into t values (0, ifnull('',0)+0); +select * from t; +-- error 1366 +insert into t values ('', 0); +-- error 1366 +insert into t values (0, ''); +-- error 1292 +update t set a = ''; +-- error 1292 +update t set b = ''; +update t set a = ifnull('',0)+0; +update t set b = ifnull('',0)+0; +delete from t where a = ''; +select * from t; + +# TestInsertFloatOverflow +drop table if exists t,t1; +create table t(col1 FLOAT, col2 FLOAT(10,2), col3 DOUBLE, col4 DOUBLE(10,2), col5 DECIMAL, col6 DECIMAL(10,2)); +-- error 1264 +insert into t values (-3.402823466E+68, -34028234.6611, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +-- error 1264 +insert into t values (-34028234.6611, -3.402823466E+68, -1.7976931348623157E+308, -17976921.34, -9999999999, -99999999.99); +create table t1(id1 float,id2 float); +insert ignore into t1 values(999999999999999999999999999999999999999,-999999999999999999999999999999999999999); +select @@warning_count; +select convert(id1,decimal(65)),convert(id2,decimal(65)) from t1; + +# TestTextTooLongError +# Fix https://github.com/pingcap/tidb/issues/32601 +set sql_mode = 'ONLY_FULL_GROUP_BY,STRICT_ALL_TABLES,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +# For max_allowed_packet default value is big enough to ensure tinytext, text can test correctly +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +drop table if exists t1; +CREATE TABLE t1(c1 mediumtext); +-- error 1406 +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 8777215)); +# For long text, max_allowed_packet default value can not allow 4GB package, skip the test case. +# Set non strict sql_mode, we are not supposed to raise an error but to truncate the value. +set sql_mode = 'ONLY_FULL_GROUP_BY,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION'; +drop table if exists t1; +CREATE TABLE t1(c1 TINYTEXT CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 128)); +select length(c1) from t1; +drop table if exists t1; +CREATE TABLE t1(c1 Text CHARACTER SET utf8mb4); +INSERT INTO t1 (c1) VALUES(REPEAT(X'C385', 32768)); +select length(c1) from t1; +# For mediumtext or bigger size, for tikv limit, we will get:ERROR 8025 (HY000): entry too large, the max entry size is 6291456, the size of data is 16777247, no need to test. +set sql_mode = default; + +# TestAutoRandomIDExplicit +set @@allow_auto_random_explicit_insert = true; +drop table if exists ar; +create table ar (id bigint key clustered auto_random, name char(10)); +insert into ar(id) values (1); +select id from ar; +select last_insert_id(); +delete from ar; +insert into ar(id) values (1), (2); +select id from ar; +select last_insert_id(); +delete from ar; +drop table ar; +set @@allow_auto_random_explicit_insert = default; + +# TestInsertErrorMsg +drop table if exists t, t1; +create table t (a int primary key, b datetime, d date); +-- error 1292 +insert into t values (1, '2019-02-11 30:00:00', '2019-01-31'); +CREATE TABLE t1 (a BINARY(16) PRIMARY KEY); +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +-- error 1062 +INSERT INTO t1 VALUES (AES_ENCRYPT('a','a')); +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +-- error 1062 +INSERT INTO t1 VALUES (AES_ENCRYPT('b','b')); +drop table if exists t1; +create table t1 (a bit primary key) engine=innodb; +insert into t1 values (b'0'); +-- error 1062 +insert into t1 values (b'0'); + +# TestIssue16366 +drop table if exists t; +create table t(c numeric primary key); +insert ignore into t values(null); +-- error 1062 +insert into t values(0); + +# TestClusterPrimaryTablePlainInsert +set tidb_enable_clustered_index = on; +drop table if exists t1pk; +create table t1pk(id varchar(200) primary key, v int); +insert into t1pk(id, v) values('abc', 1); +select * from t1pk; +set @@tidb_constraint_check_in_place=true; +-- error 1062 +insert into t1pk(id, v) values('abc', 2); +set @@tidb_constraint_check_in_place=false; +-- error 1062 +insert into t1pk(id, v) values('abc', 3); +select v, id from t1pk; +select id from t1pk where id = 'abc'; +select v, id from t1pk where id = 'abc'; +drop table if exists t3pk; +create table t3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 1); +select * from t3pk; +set @@tidb_constraint_check_in_place=true; +-- error 1062 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 2); +set @@tidb_constraint_check_in_place=false; +-- error 1062 +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 100, 3); +select v, id3, id2, id1 from t3pk; +select id3, id2, id1 from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +select id3, id2, id1, v from t3pk where id3 = 100 and id2 = 'xyz' and id1 = 'abc'; +insert into t3pk(id1, id2, id3, v) values('abc', 'xyz', 101, 1); +insert into t3pk(id1, id2, id3, v) values('abc', 'zzz', 101, 1); +drop table if exists t1pku; +create table t1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into t1pku(id, uk, v) values('abc', 1, 2); +select * from t1pku where id = 'abc'; +-- error 1062 +insert into t1pku(id, uk, v) values('aaa', 1, 3); +select * from t1pku; +select * from t3pk where (id1, id2, id3) in (('abc', 'xyz', 100), ('abc', 'xyz', 101), ('abc', 'zzz', 101)); +set @@tidb_constraint_check_in_place=default; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryTableInsertIgnore +set tidb_enable_clustered_index = on; +drop table if exists it1pk; +create table it1pk(id varchar(200) primary key, v int); +insert into it1pk(id, v) values('abc', 1); +insert ignore into it1pk(id, v) values('abc', 2); +select * from it1pk where id = 'abc'; +drop table if exists it2pk; +create table it2pk(id1 varchar(200), id2 varchar(200), v int, primary key(id1, id2)); +insert into it2pk(id1, id2, v) values('abc', 'cba', 1); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +insert ignore into it2pk(id1, id2, v) values('abc', 'cba', 2); +select * from it2pk where id1 = 'abc' and id2 = 'cba'; +drop table if exists it1pku; +create table it1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into it1pku(id, uk, v) values('abc', 1, 2); +select * from it1pku where id = 'abc'; +insert ignore into it1pku(id, uk, v) values('aaa', 1, 3), ('bbb', 2, 1); +select * from it1pku; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryTableInsertDuplicate +set tidb_enable_clustered_index = on; +drop table if exists dt1pi; +create table dt1pi(id varchar(200) primary key, v int); +insert into dt1pi(id, v) values('abb', 1),('acc', 2); +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1; +select * from dt1pi; +insert into dt1pi(id, v) values('abb', 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1pi; +drop table if exists dt1piu; +create table dt1piu(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt1piu(id, uk, v) values('abb', 1, 10),('acc', 2, 20); +insert into dt1piu(id, uk, v) values('xyz', 1, 100) on duplicate key update v = v + 1; +select * from dt1piu; +insert into dt1piu(id, uk, v) values('abb', 1, 2) on duplicate key update v = v + 1, id = 'xxx'; +select * from dt1piu; +drop table if exists ts1pk; +create table ts1pk(id1 timestamp, id2 timestamp, v int, primary key(id1, id2)); +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 1); +select id1, id2, v from ts1pk; +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v); +select id1, id2, v from ts1pk; +insert into ts1pk (id1, id2, v) values('2018-01-01 11:11:11', '2018-01-01 11:11:11', 2) on duplicate key update v = values(v), id1 = '2018-01-01 11:11:12'; +select id1, id2, v from ts1pk; +set tidb_enable_clustered_index = default; + +# TestClusterPrimaryKeyForIndexScan +set tidb_enable_clustered_index = on; +drop table if exists pkt1; +CREATE TABLE pkt1 (a varchar(255), b int, index idx(b), primary key(a,b)); +insert into pkt1 values ('aaa',1); +select b from pkt1 where b = 1; +drop table if exists pkt2; +CREATE TABLE pkt2 (a varchar(255), b int, unique index idx(b), primary key(a,b)); +insert into pkt2 values ('aaa',1); +select b from pkt2 where b = 1; +drop table if exists issue_18232; +create table issue_18232 (a int, b int, c int, d int, primary key (a, b), index idx(c)); +select a from issue_18232 use index (idx); +select b from issue_18232 use index (idx); +select a,b from issue_18232 use index (idx); +select c from issue_18232 use index (idx); +select a,c from issue_18232 use index (idx); +select b,c from issue_18232 use index (idx); +select a,b,c from issue_18232 use index (idx); +select d from issue_18232 use index (idx); +select a,d from issue_18232 use index (idx); +select b,d from issue_18232 use index (idx); +select a,b,d from issue_18232 use index (idx); +select c,d from issue_18232 use index (idx); +select a,c,d from issue_18232 use index (idx); +select b,c,d from issue_18232 use index (idx); +select a,b,c,d from issue_18232 use index (idx); +set tidb_enable_clustered_index = default; + +# TestIssue20768 +drop table if exists t1, t2; +create table t1(a year, primary key(a)); +insert ignore into t1 values(null); +create table t2(a int, key(a)); +insert into t2 values(0); +select /*+ hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_join(t2) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ inl_merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; +select /*+ merge_join(t1) */ * from t1 join t2 on t1.a = t2.a; + +# TestIssue10402 +drop table if exists vctt; +create table vctt (v varchar(4), c char(4)); +insert into vctt values ('ab ', 'ab '); +select * from vctt; +delete from vctt; +insert into vctt values ('ab\n\n\n', 'ab\n\n\n'), ('ab\t\t\t', 'ab\t\t\t'), ('ab ', 'ab '), ('ab\r\r\r', 'ab\r\r\r'); +show warnings; +select * from vctt; +select length(v), length(c) from vctt; + +# TestDuplicatedEntryErr +# See https://github.com/pingcap/tidb/issues/24582 +drop table if exists t1; +create table t1(a int, b varchar(20), primary key(a,b(3)) clustered); +insert into t1 values(1,'aaaaa'); +-- error 1062 +insert into t1 values(1,'aaaaa'); +-- error 1062 +insert into t1 select 1, 'aaa'; +insert into t1 select 1, 'bb'; +-- error 1062 +insert into t1 select 1, 'bb'; + +# TestBinaryLiteralInsertToEnum +drop table if exists bintest; +create table bintest (h enum(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; + +# TestBinaryLiteralInsertToSet +drop table if exists bintest; +create table bintest (h set(0x61, '1', 'b')) character set utf8mb4; +insert into bintest(h) values(0x61); +select * from bintest; + +# TestGlobalTempTableAutoInc +drop table if exists temp_test; +create global temporary table temp_test(id int primary key auto_increment) on commit delete rows; + +## Data is cleared after transaction auto commits. +insert into temp_test(id) values(0); +select * from temp_test; + +## Data is not cleared inside a transaction. +begin; +insert into temp_test(id) values(0); +select * from temp_test; +commit; + +## AutoID allocator is cleared. +begin; +insert into temp_test(id) values(0); +select * from temp_test; +## Test whether auto-inc is incremental +insert into temp_test(id) values(0); +select id from temp_test order by id; +commit; + +## multi-value insert +begin; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +commit; + +## rebase +begin; +insert into temp_test(id) values(10); +insert into temp_test(id) values(0); +select id from temp_test order by id; +insert into temp_test(id) values(20), (30); +insert into temp_test(id) values(0), (0); +select id from temp_test order by id; +commit; +drop table if exists temp_test; + +# TestGlobalTempTableRowID +drop table if exists temp_test; +create global temporary table temp_test(id int) on commit delete rows; + +## Data is cleared after transaction auto commits. +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; + +## Data is not cleared inside a transaction. +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +commit; + +## AutoID allocator is cleared. +begin; +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test; +## Test whether row id is incremental +insert into temp_test(id) values(0); +select _tidb_rowid from temp_test order by _tidb_rowid; +commit; + +## multi-value insert +begin; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +insert into temp_test(id) values(0), (0); +select _tidb_rowid from temp_test order by _tidb_rowid; +commit; +drop table if exists temp_test; + +# TestIssue26762 +drop table if exists t1; +create table t1(c1 date); +-- error 1292 +insert into t1 values('2020-02-31'); +set @@sql_mode='ALLOW_INVALID_DATES'; +insert into t1 values('2020-02-31'); +select * from t1; +set @@sql_mode='STRICT_TRANS_TABLES'; +-- error 1292 +insert into t1 values('2020-02-31'); +set sql_mode=default; + +# TestStringtoDecimal +drop table if exists t; +create table t (id decimal(10)); +-- error 1366 +insert into t values('1sdf'); +-- error 1366 +insert into t values('1edf'); +-- error 1366 +insert into t values('12Ea'); +-- error 1366 +insert into t values('1E'); +-- error 1366 +insert into t values('1e'); +-- error 1366 +insert into t values('1.2A'); +-- error 1366 +insert into t values('1.2.3.4.5'); +-- error 1366 +insert into t values('1.2.'); +-- error 1366 +insert into t values('1,999.00'); +## TODO: MySQL8.0 reports Note 1265 Data truncated for column 'id' at row 1 +insert into t values('12e-3'); +show warnings; +select id from t; +drop table if exists t; + +# TestReplaceAllocatingAutoID +# https://github.com/pingcap/tidb/issues/29483 +SET sql_mode='NO_ENGINE_SUBSTITUTION'; +DROP TABLE IF EXISTS t1; +CREATE TABLE t1 (a tinyint not null auto_increment primary key, b char(20)); +INSERT INTO t1 VALUES (127,'maxvalue'); +## Note that this error is different from MySQL's duplicated primary key error +-- error 1467 +REPLACE INTO t1 VALUES (0,'newmaxvalue'); +set sql_mode=default; + +# TestInsertIntoSelectError +DROP TABLE IF EXISTS t1; +CREATE TABLE t1(a INT) ENGINE = InnoDB; +INSERT IGNORE into t1(SELECT SLEEP(NULL)); +SHOW WARNINGS; +INSERT IGNORE into t1(SELECT SLEEP(-1)); +SHOW WARNINGS; +INSERT IGNORE into t1(SELECT SLEEP(1)); +SELECT * FROM t1; +DROP TABLE t1; + +# TestIssue32213 +drop table if exists t1; +create table t1(c1 float); +insert into t1 values(999.99); +select cast(t1.c1 as decimal(4, 1)) from t1; +select cast(t1.c1 as decimal(5, 1)) from t1; +drop table if exists t1; +create table t1(c1 decimal(6, 4)); +insert into t1 values(99.9999); +select cast(t1.c1 as decimal(5, 3)) from t1; +select cast(t1.c1 as decimal(6, 3)) from t1; + +# TestInsertBigScientificNotation +# https://github.com/pingcap/tidb/issues/47787 +drop table if exists t1; +create table t1(id int, a int); +set @@SQL_MODE='STRICT_TRANS_TABLES'; +-- error 1264 +insert into t1 values(1, '1e100'); +-- error 1264 +insert into t1 values(2, '-1e100'); +select id, a from t1; +set @@SQL_MODE=''; +insert into t1 values(1, '1e100'); +show warnings; +insert into t1 values(2, '-1e100'); +show warnings; +select id, a from t1 order by id asc; +set sql_mode=default; + +# TestUnsignedDecimalFloatInsertNegative +# https://github.com/pingcap/tidb/issues/47945 +drop table if exists tf; +create table tf(a float(1, 0) unsigned); +-- error 1264 +insert into tf values('-100'); +set @@sql_mode=''; +insert into tf values('-100'); +select * from tf; +set @@sql_mode=default; + diff --git a/tests/integrationtest/t/executor/inspection_common.test b/tests/integrationtest/t/executor/inspection_common.test new file mode 100644 index 0000000000000..5b70a3a8f1fae --- /dev/null +++ b/tests/integrationtest/t/executor/inspection_common.test @@ -0,0 +1,6 @@ +# TestInspectionRules +select count(*) from information_schema.inspection_rules; +select count(*) from information_schema.inspection_rules where type='inspection'; +select count(*) from information_schema.inspection_rules where type='summary'; +select count(*) from information_schema.inspection_rules where type='inspection' and type='summary'; + diff --git a/tests/integrationtest/t/executor/jointest/hash_join.test b/tests/integrationtest/t/executor/jointest/hash_join.test new file mode 100644 index 0000000000000..54e82c01fc6fa --- /dev/null +++ b/tests/integrationtest/t/executor/jointest/hash_join.test @@ -0,0 +1,165 @@ +# TestIssue13449 +drop table if exists t, s; +create table t(a int, index(a)); +create table s(a int, index(a)); +insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9), (10), (11), (12), (13), (14), (15), (16), (17), (18), (19), (20), (21), (22), (23), (24), (25), (26), (27), (28), (29), (30), (31), (32), (33), (34), (35), (36), (37), (38), (39), (40), (41), (42), (43), (44), (45), (46), (47), (48), (49), (50), (51), (52), (53), (54), (55), (56), (57), (58), (59), (60), (61), (62), (63), (64), (65), (66), (67), (68), (69), (70), (71), (72), (73), (74), (75), (76), (77), (78), (79), (80), (81), (82), (83), (84), (85), (86), (87), (88), (89), (90), (91), (92), (93), (94), (95), (96), (97), (98), (99), (100), (101), (102), (103), (104), (105), (106), (107), (108), (109), (110), (111), (112), (113), (114), (115), (116), (117), (118), (119), (120), (121), (122), (123), (124), (125), (126), (127), (128); +insert into s values(1), (128); +set @@tidb_max_chunk_size=32; +set @@tidb_index_lookup_join_concurrency=1; +set @@tidb_index_join_batch_size=32; +desc format = 'brief' select /*+ INL_HASH_JOIN(s) */ * from t join s on t.a=s.a order by t.a; +select /*+ INL_HASH_JOIN(s) */ * from t join s on t.a=s.a order by t.a; + +set @@tidb_max_chunk_size=default; +set @@tidb_index_lookup_join_concurrency=default; +set @@tidb_index_join_batch_size=default; + +# TestHashJoinExecEncodeDecodeRow +drop table if exists t1, t2; +create table t1 (id int); +create table t2 (id int, name varchar(255), ts timestamp); +insert into t1 values (1); +insert into t2 values (1, 'xxx', '2003-06-09 10:51:26'); +select ts from t1 inner join t2 where t2.name = 'xxx'; + +# TestIndexLookupJoin +set tidb_cost_model_version=2; +set @@tidb_init_chunk_size=2; +DROP TABLE IF EXISTS t; +CREATE TABLE `t` (`a` int, pk integer auto_increment,`b` char (20),primary key (pk)); +CREATE INDEX idx_t_a ON t(`a`); +CREATE INDEX idx_t_b ON t(`b`); +INSERT INTO t VALUES (148307968, DEFAULT, 'nndsjofmpdxvhqv') , (-1327693824, DEFAULT, 'pnndsjofmpdxvhqvfny') , (-277544960, DEFAULT, 'fpnndsjo'); +DROP TABLE IF EXISTS s; +CREATE TABLE `s` (`a` int, `b` char (20)); +CREATE INDEX idx_s_a ON s(`a`); +INSERT INTO s VALUES (-277544960, 'fpnndsjo') , (2, 'kfpnndsjof') , (2, 'vtdiockfpn'), (-277544960, 'fpnndsjo') , (2, 'kfpnndsjof') , (6, 'ckfp'); +--sorted_result +select /*+ INL_JOIN(t, s) */ t.a from t join s on t.a = s.a; +--sorted_result +select /*+ INL_HASH_JOIN(t, s) */ t.a from t join s on t.a = s.a; +--sorted_result +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t join s on t.a = s.a; +--sorted_result +select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a; +--sorted_result +select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a; +--sorted_result +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a; +--sorted_result +select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960; +--sorted_result +select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960; +--sorted_result +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a where t.a = -277544960; +--sorted_result +select /*+ INL_JOIN(t, s) */ t.a from t right join s on t.a = s.a; +--sorted_result +select /*+ INL_HASH_JOIN(t, s) */ t.a from t right join s on t.a = s.a; +--sorted_result +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t right join s on t.a = s.a; +select /*+ INL_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc; +select /*+ INL_HASH_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc; +select /*+ INL_MERGE_JOIN(t, s) */ t.a from t left join s on t.a = s.a order by t.a desc; +DROP TABLE IF EXISTS t; +CREATE TABLE t(a BIGINT PRIMARY KEY, b BIGINT); +INSERT INTO t VALUES(1, 2); +--sorted_result +SELECT /*+ INL_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a; +--sorted_result +SELECT /*+ INL_HASH_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_HASH_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a; +--sorted_result +SELECT /*+ INL_MERGE_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a UNION ALL SELECT /*+ INL_MERGE_JOIN(t1, t2) */ * FROM t t1 JOIN t t2 ON t1.a=t2.a; +drop table if exists t; +create table t(a decimal(6,2), index idx(a)); +insert into t values(1.01), (2.02), (NULL); +select /*+ INL_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a; +select /*+ INL_HASH_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a; +select /*+ INL_MERGE_JOIN(t2) */ t1.a from t t1 join t t2 on t1.a=t2.a order by t1.a; +drop table if exists t; +create table t(a bigint, b bigint, unique key idx1(a, b)); +insert into t values(1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6); +set @@tidb_init_chunk_size = 2; +--sorted_result +select /*+ INL_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4; +--sorted_result +select /*+ INL_HASH_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4; +--sorted_result +select /*+ INL_MERGE_JOIN(t2) */ * from t t1 left join t t2 on t1.a = t2.a and t1.b = t2.b + 4; +drop table if exists t1, t2, t3; +create table t1(a int primary key, b int); +insert into t1 values(1, 0), (2, null); +create table t2(a int primary key); +insert into t2 values(0); +--sorted_result +select /*+ INL_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a; +--sorted_result +select /*+ INL_HASH_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a; +--sorted_result +select /*+ INL_MERGE_JOIN(t2)*/ * from t1 left join t2 on t1.b = t2.a; +create table t3(a int, key(a)); +insert into t3 values(0); +--sorted_result +select /*+ INL_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a; +--sorted_result +select /*+ INL_HASH_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a; +--sorted_result +select /*+ INL_MERGE_JOIN(t3)*/ * from t1 left join t3 on t1.b = t3.a; +drop table if exists t,s; +create table t(a int primary key auto_increment, b time); +create table s(a int, b time); +alter table s add index idx(a,b); +set @@tidb_index_join_batch_size=4; +set @@tidb_init_chunk_size=1; +set @@tidb_max_chunk_size=32; +set @@tidb_index_lookup_join_concurrency=15; +set @@session.tidb_executor_concurrency = 4; +set @@session.tidb_hash_join_concurrency = 5; +insert into t values(0, '01:01:01'); +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into t select 0, b + 1 from t; +insert into s select a, b - 1 from t; +analyze table t; +analyze table s; +desc format = 'brief' select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +--sorted_result +select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +set @@tidb_index_lookup_join_concurrency=1; +--sorted_result +select /*+ TIDB_INLJ(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +desc format = 'brief' select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +--sorted_result +select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +set @@tidb_index_lookup_join_concurrency=1; +--sorted_result +select /*+ INL_MERGE_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +desc format = 'brief' select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +--sorted_result +select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +set @@tidb_index_lookup_join_concurrency=1; +--sorted_result +select /*+ INL_HASH_JOIN(s) */ count(*) from t join s use index(idx) on s.a = t.a and s.b < t.b; +drop table t1, t2; +create table t1(id int primary key); +create table t2(a int, b int); +insert into t1 values(1); +insert into t2 values(1,1),(2,1); +--sorted_result +select /*+ inl_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id; +--sorted_result +select /*+ inl_hash_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id; +--sorted_result +select /*+ inl_merge_join(t1)*/ * from t1 join t2 on t2.b=t1.id and t2.a=t1.id; + +set tidb_cost_model_version=default; +set @@tidb_init_chunk_size=default; +set @@tidb_index_join_batch_size=default; +set @@tidb_init_chunk_size=default; +set @@tidb_max_chunk_size=default; +set @@tidb_index_lookup_join_concurrency=default; +set @@session.tidb_executor_concurrency = default; +set @@session.tidb_hash_join_concurrency = default; diff --git a/tests/integrationtest/t/executor/join.test b/tests/integrationtest/t/executor/jointest/join.test similarity index 96% rename from tests/integrationtest/t/executor/join.test rename to tests/integrationtest/t/executor/jointest/join.test index e6fac7c4f1434..e94d4207335b8 100644 --- a/tests/integrationtest/t/executor/join.test +++ b/tests/integrationtest/t/executor/jointest/join.test @@ -569,3 +569,20 @@ select * from (t1 natural join t2) left outer join (t3 natural join t4) using (b --sorted_result select * from (t1 natural join t2) right outer join (t3 natural join t4) using (c,b); +# TestIssue19410 +drop table if exists t, t1, t2, t3; +create table t(a int, b enum('A', 'B')); +create table t1(a1 int, b1 enum('B', 'A') NOT NULL, UNIQUE KEY (b1)); +insert into t values (1, 'A'); +insert into t1 values (1, 'A'); +select /*+ INL_HASH_JOIN(t1) */ * from t join t1 on t.b = t1.b1; +select /*+ INL_JOIN(t1) */ * from t join t1 on t.b = t1.b1; +create table t2(a1 int, b1 enum('C', 'D') NOT NULL, UNIQUE KEY (b1)); +insert into t2 values (1, 'C'); +select /*+ INL_HASH_JOIN(t2) */ * from t join t2 on t.b = t2.b1; +select /*+ INL_JOIN(t2) */ * from t join t2 on t.b = t2.b1; +create table t3(a1 int, b1 enum('A', 'B') NOT NULL, UNIQUE KEY (b1)); +insert into t3 values (1, 'A'); +select /*+ INL_HASH_JOIN(t3) */ * from t join t3 on t.b = t3.b1; +select /*+ INL_JOIN(t3) */ * from t join t3 on t.b = t3.b1; + diff --git a/tests/integrationtest/t/executor/merge_join.test b/tests/integrationtest/t/executor/merge_join.test new file mode 100644 index 0000000000000..7e20d21501823 --- /dev/null +++ b/tests/integrationtest/t/executor/merge_join.test @@ -0,0 +1,287 @@ +# TestMergeJoin +drop table if exists t; +drop table if exists t1; +create table t(c1 int, c2 int); +create table t1(c1 int, c2 int); +insert into t values(1,1),(2,2); +insert into t1 values(2,3),(4,4); +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1 (c1 int, c2 int); +create table t2 (c1 int, c2 int); +create table t3 (c1 int, c2 int); +insert into t1 values (1,1), (2,2), (3,3); +insert into t2 values (1,1), (3,3), (5,5); +insert into t3 values (1,1), (5,5), (9,9); +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2; +drop table if exists t1; +create table t1 (c1 int); +insert into t1 values (1), (1), (1); +select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1; +drop table if exists t; +drop table if exists t1; +create table t(c1 int, index k(c1)); +create table t1(c1 int); +insert into t values (1),(2),(3),(4),(5),(6),(7); +insert into t1 values (1),(2),(3),(4),(5),(6),(7); +select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1; +## Test LogicalSelection under LogicalJoin. +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1; +begin; +## Test LogicalLock under LogicalJoin. +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1; +## Test LogicalUnionScan under LogicalJoin. +insert into t1 values(8); +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1; +rollback; +drop table if exists t; +drop table if exists t1; +create table t(c1 int); +create table t1(c1 int unsigned); +insert into t values (1); +insert into t1 values (1); +select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1; +drop table if exists t; +create table t(a int, b int, index a(a), index b(b)); +insert into t values(1, 2); +select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a; +drop table if exists t; +drop table if exists s; +create table t(a int, b int, primary key(a, b)); +insert into t value(1,1),(1,2),(1,3),(1,4); +create table s(a int, primary key(a)); +insert into s value(1); +select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a; +drop table if exists t; +create table t(a int); +insert into t value(1),(2); +## Test TIDB_SMJ for cartesian product. +explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +drop table if exists t; +drop table if exists s; +create table t(a int, b int); +insert into t values(1,1),(1,2); +create table s(a int, b int); +insert into s values(1,1); +explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +## Test TIDB_SMJ for join with order by desc, see https://github.com/pingcap/tidb/issues/14483 +drop table if exists t; +drop table if exists t1; +create table t (a int, key(a)); +create table t1 (a int, key(a)); +insert into t values (1), (2), (3); +insert into t1 values (1), (2), (3); +select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc; +drop table if exists t; +create table t (a int, b int, key(a), key(b)); +insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3); +select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc; +drop table if exists s; +create table s (a int); +insert into s values (4), (1), (3), (2); +explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; + +# TestShuffleMergeJoin +## Same as TestMergeJoin except `tidb_merge_join_concurrency = 4;` +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists t; +drop table if exists t1; +create table t(c1 int, c2 int); +create table t1(c1 int, c2 int); +insert into t values(1,1),(2,2); +insert into t1 values(2,3),(4,4); +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t1 right outer join t on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +select /*+ TIDB_SMJ(t) */ * from t right outer join t1 on t.c1 = t1.c1 where t.c1 = 1 or t1.c2 > 20; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 where t1.c1 = 3 or false; +explain format = 'brief' select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +select /*+ TIDB_SMJ(t) */ * from t left outer join t1 on t.c1 = t1.c1 and t.c1 != 1 order by t1.c1; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1 (c1 int, c2 int); +create table t2 (c1 int, c2 int); +create table t3 (c1 int, c2 int); +insert into t1 values (1,1), (2,2), (3,3); +insert into t2 values (1,1), (3,3), (5,5); +insert into t3 values (1,1), (5,5), (9,9); +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 left join t2 on t1.c1 = t2.c1 right join t3 on t2.c1 = t3.c1 order by t1.c1, t1.c2, t2.c1, t2.c2, t3.c1, t3.c2; +drop table if exists t1; +create table t1 (c1 int); +insert into t1 values (1), (1), (1); +select/*+ TIDB_SMJ(t) */ * from t1 a join t1 b on a.c1 = b.c1; +drop table if exists t; +drop table if exists t1; +create table t(c1 int, index k(c1)); +create table t1(c1 int); +insert into t values (1),(2),(3),(4),(5),(6),(7); +insert into t1 values (1),(2),(3),(4),(5),(6),(7); +select /*+ TIDB_SMJ(a,b) */ a.c1 from t a , t1 b where a.c1 = b.c1 order by a.c1; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 order by b.c1; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 limit 3) b where a.c1 = b.c1 and b.c1 is not null order by b.c1; +begin; +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , (select * from t1 for update) b where a.c1 = b.c1 order by a.c1; +insert into t1 values(8); +select /*+ TIDB_SMJ(a, b) */ a.c1 from t a , t1 b where a.c1 = b.c1; +rollback; +drop table if exists t; +drop table if exists t1; +create table t(c1 int); +create table t1(c1 int unsigned); +insert into t values (1); +insert into t1 values (1); +select /*+ TIDB_SMJ(t,t1) */ t.c1 from t , t1 where t.c1 = t1.c1; +drop table if exists t; +create table t(a int, b int, index a(a), index b(b)); +insert into t values(1, 2); +select /*+ TIDB_SMJ(t, t1) */ t.a, t1.b from t right join t t1 on t.a = t1.b order by t.a; +drop table if exists t; +drop table if exists s; +create table t(a int, b int, primary key(a, b)); +insert into t value(1,1),(1,2),(1,3),(1,4); +create table s(a int, primary key(a)); +insert into s value(1); +select /*+ TIDB_SMJ(t, s) */ count(*) from t join s on t.a = s.a; +drop table if exists t; +create table t(a int); +insert into t value(1),(2); +explain format = 'brief' select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +select /*+ TIDB_SMJ(t1, t2) */ * from t t1 join t t2 order by t1.a, t2.a; +drop table if exists t; +drop table if exists s; +create table t(a int, b int); +insert into t values(1,1),(1,2); +create table s(a int, b int); +insert into s values(1,1); +explain format = 'brief' select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +select /*+ TIDB_SMJ(t, s) */ a in (select a from s where s.b >= t.b) from t; +drop table if exists t; +drop table if exists t1; +create table t (a int, key(a)); +create table t1 (a int, key(a)); +insert into t values (1), (2), (3); +insert into t1 values (1), (2), (3); +select /*+ TIDB_SMJ(t1, t2) */ t.a from t, t1 where t.a = t1.a order by t1.a desc; +drop table if exists t; +create table t (a int, b int, key(a), key(b)); +insert into t values (1,1),(1,2),(1,3),(2,1),(2,2),(3,1),(3,2),(3,3); +select /*+ TIDB_SMJ(t1, t2) */ t1.a from t t1, t t2 where t1.a = t2.b order by t1.a desc; +drop table if exists s; +create table s (a int); +insert into s values (4), (1), (3), (2); +explain format = 'brief' select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +select s1.a1 from (select a as a1 from s order by s.a desc) as s1 join (select a as a2 from s order by s.a desc) as s2 on s1.a1 = s2.a2 order by s1.a1 desc; +set @@session.tidb_merge_join_concurrency = default; + +# Test3WaysMergeJoin +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1(c1 int, c2 int, PRIMARY KEY (c1)); +create table t2(c1 int, c2 int, PRIMARY KEY (c1)); +create table t3(c1 int, c2 int, PRIMARY KEY (c1)); +insert into t1 values(1,1),(2,2),(3,3); +insert into t2 values(2,3),(3,4),(4,5); +insert into t3 values(1,2),(2,4),(3,10); +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +# In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended +# On the other hand, t1 order kept so no final sort appended +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; + +# Test3WaysShuffleMergeJoin +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists t1; +drop table if exists t2; +drop table if exists t3; +create table t1(c1 int, c2 int, PRIMARY KEY (c1)); +create table t2(c1 int, c2 int, PRIMARY KEY (c1)); +create table t3(c1 int, c2 int, PRIMARY KEY (c1)); +insert into t1 values(1,1),(2,2),(3,3); +insert into t2 values(2,3),(3,4),(4,5); +insert into t3 values(1,2),(2,4),(3,10); +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t2.c1 = t3.c1 order by 1; +# In below case, t1 side filled with null when no matched join, so that order is not kept and sort appended +# On the other hand, t1 order kept so no final sort appended +explain format = 'brief' select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +select /*+ TIDB_SMJ(t1,t2,t3) */ * from t1 right outer join t2 on t1.c1 = t2.c1 join t3 on t1.c1 = t3.c1 order by 1; +set @@session.tidb_merge_join_concurrency = default; + +# TestMergeJoinDifferentTypes +set @@session.tidb_executor_concurrency = 4; +set @@session.tidb_hash_join_concurrency = 5; +set @@session.tidb_distsql_scan_concurrency = 15; +drop table if exists t1; +drop table if exists t2; +create table t1(a bigint, b bit(1), index idx_a(a)); +create table t2(a bit(1) not null, b bit(1), index idx_a(a)); +insert into t1 values(1, 1); +insert into t2 values(1, 1); +select hex(t1.a), hex(t2.a) from t1 inner join t2 on t1.a=t2.a; +drop table if exists t1; +drop table if exists t2; +create table t1(a float, b double, index idx_a(a)); +create table t2(a double not null, b double, index idx_a(a)); +insert into t1 values(1, 1); +insert into t2 values(1, 1); +select t1.a, t2.a from t1 inner join t2 on t1.a=t2.a; +drop table if exists t1; +drop table if exists t2; +create table t1(a bigint signed, b bigint, index idx_a(a)); +create table t2(a bigint unsigned, b bigint, index idx_a(a)); +insert into t1 values(-1, 0), (-1, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0); +insert into t2 values(18446744073709551615, 0), (18446744073709551615, 0), (0, 0), (0, 0), (pow(2, 63), 0), (pow(2, 63), 0); +select t1.a, t2.a from t1 join t2 on t1.a=t2.a order by t1.a; +set @@session.tidb_executor_concurrency = default; +set @@session.tidb_hash_join_concurrency = default; +set @@session.tidb_distsql_scan_concurrency = default; + +# TestMergeJoinWithOtherConditions +drop table if exists R; +drop table if exists Y; +create table Y (a int primary key, b int, index id_b(b)); +insert into Y values (0,2),(2,2); +create table R (a int primary key, b int); +insert into R values (2,2); +# the max() limits the required rows at most one +# TODO(fangzhuhe): specify Y as the build side using hints +select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a; + +# TestShuffleMergeJoinWithOtherConditions +set @@session.tidb_merge_join_concurrency = 4; +drop table if exists R; +drop table if exists Y; +create table Y (a int primary key, b int, index id_b(b)); +insert into Y values (0,2),(2,2); +create table R (a int primary key, b int); +insert into R values (2,2); +# the max() limits the required rows at most one +# TODO(fangzhuhe): specify Y as the build side using hints +select /*+tidb_smj(R)*/ max(Y.a) from R join Y on R.a=Y.b where R.b <= Y.a; +set @@session.tidb_merge_join_concurrency = default; + diff --git a/tests/integrationtest/t/executor/parallel_apply.test b/tests/integrationtest/t/executor/parallel_apply.test new file mode 100644 index 0000000000000..5cb79755f4397 --- /dev/null +++ b/tests/integrationtest/t/executor/parallel_apply.test @@ -0,0 +1,140 @@ +# TestSetTiDBEnableParallelApply +set tidb_enable_parallel_apply=0; +select @@tidb_enable_parallel_apply; +set tidb_enable_parallel_apply=1; +select @@tidb_enable_parallel_apply; +set tidb_enable_parallel_apply=on; +select @@tidb_enable_parallel_apply; +set tidb_enable_parallel_apply=off; +select @@tidb_enable_parallel_apply; +-- error 1231 +set tidb_enable_parallel_apply=-1; +-- error 1231 +set tidb_enable_parallel_apply=2; +-- error 1231 +set tidb_enable_parallel_apply=1000; +-- error 1231 +set tidb_enable_parallel_apply='onnn'; +set tidb_enable_parallel_apply=default; + +# TestApplyWithOtherFeatures +set tidb_enable_parallel_apply=true; +drop table if exists t, t1; +create table t(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int); +create table t1(a varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_general_ci, b int); +insert into t values ('a', 1), ('A', 2), ('a', 3), ('A', 4); +insert into t1 values ('a', 1), ('A', 2), ('a', 3), ('A', 4); +--sorted_result +select (select min(t1.b) from t1 where t1.a >= t.a), (select sum(t1.b) from t1 where t1.a >= t.a) from t; +--sorted_result +select (select min(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b), (select sum(t1.b) from t1 where t1.a >= t.a and t1.b >= t.b) from t; + +## plan cache +set tidb_enable_prepared_plan_cache=1; +drop table if exists t1, t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values (1, 1), (1, 5), (2, 3), (2, 4), (3, 3); +insert into t2 values (0, 1), (2, -1), (3, 2); +prepare stmt from "select * from t1 where t1.b >= (select sum(t2.b) from t2 where t2.a > t1.a and t2.a > ?)"; +set @a=1; +--sorted_result +execute stmt using @a; +set @a=2; +--sorted_result +execute stmt using @a; +select @@last_plan_from_cache; + +## cluster index +set tidb_enable_clustered_index=ON; +drop table if exists t, t2; +create table t(a int, b int, c int, primary key(a, b)); +create table t2(a int, b int, c int, primary key(a, c)); +insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4); +insert into t2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4); +--sorted_result +select * from t where (select min(t2.b) from t2 where t2.a > t.a) > 0; +set tidb_enable_clustered_index=INT_ONLY; + +## partitioning table +drop table if exists t1, t2; +create table t1(a int, b int) partition by range(a) (partition p0 values less than(10), partition p1 values less than(20), partition p2 values less than(30), partition p3 values less than(40)); +create table t2(a int, b int) partition by hash(a) partitions 4; +insert into t1 values (5, 5), (15, 15), (25, 25), (35, 35); +insert into t2 values (5, 5), (15, 15), (25, 25), (35, 35); +--sorted_result +select (select count(*) from t2 where t2.a > t1.b and t2.a=20), (select max(t2.b) from t2 where t2.a between t1.a and 20) from t1 where t1.a > 10; + +set tidb_enable_parallel_apply=default; +set tidb_enable_prepared_plan_cache=default; +set tidb_enable_clustered_index=default; + +# TestApplyInDML +set tidb_enable_parallel_apply=true; + +## delete +drop table if exists t, t2; +create table t(a bigint, b int); +create table t2(a int, b int); +insert into t values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4); +insert into t2 values (1, 1), (2, 2), (3, 3), (4, 4), (1, 1), (2, 2), (3, 3), (4, 4); +delete from t where (select min(t2.a) * 2 from t2 where t2.a < t.a) > 1; +--sorted_result +select * from t; + +## insert +drop table if exists t; +create table t(a int, b int, c int); +insert into t values (1, 1, 1), (2, 2, 2), (3, 3, 3), (1, 1, 1), (2, 2, 2), (3, 3, 3); +insert into t (select * from t where (select count(*) from t t1 where t1.b > t.a) > 2); +--sorted_result +select * from t; + +## update +drop table if exists t, t2; +create table t(a smallint, b int); +create table t2(a int, b int); +insert into t values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +update t set a = a + 1 where (select count(*) from t2 where t2.a <= t.a) in (1, 2); +--sorted_result +select * from t; + +## replace +drop table if exists t, t2; +create table t(a tinyint, b int, unique index idx(a)); +create table t2(a tinyint, b int); +insert into t values (1, 1), (2, 2), (3, 3), (4, 4); +insert into t2 values (1, 1), (2, 2), (3, 3), (1, 1), (2, 2), (3, 3); +replace into t (select pow(t2.a, 2), t2.b from t2 where (select min(t.a) from t where t.a > t2.a) between 1 and 5); +--sorted_result +select * from t; + +## transaction +drop table if exists t1, t2; +create table t1(a int, b int); +create table t2(a int, b int); +insert into t1 values (1, 2), (1, 3); +begin; +insert into t1 values (1, 4), (2, 3), (2, 5); +insert into t2 values (2, 3), (3, 4); +--sorted_result +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +delete from t1 where a = 1; +--sorted_result +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +commit; +--sorted_result +select * from t1 where t1.b > any (select t2.b from t2 where t2.b < t1.b); +set tidb_enable_parallel_apply=default; + +# TestIssue24930 +set tidb_enable_parallel_apply=true; +drop table if exists t1, t2; +create table t1(a int); +create table t2(a int); +select case when t1.a is null + then (select t2.a from t2 where t2.a = t1.a limit 1) else t1.a end a + from t1 where t1.a=1 order by a limit 1; +set tidb_enable_parallel_apply=default; + diff --git a/tests/integrationtest/t/executor/partition/issues.test b/tests/integrationtest/t/executor/partition/issues.test new file mode 100644 index 0000000000000..5ffd753115768 --- /dev/null +++ b/tests/integrationtest/t/executor/partition/issues.test @@ -0,0 +1,255 @@ +# TestIssue25527 +drop table if exists t, t0, t1, t2; +set @@tidb_partition_prune_mode = 'dynamic'; +set @@session.tidb_enable_list_partition = ON; +CREATE TABLE t ( + col1 tinyint(4) primary key +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( COL1 DIV 80 ) +PARTITIONS 6; +insert into t values(-128), (107); +prepare stmt from 'select col1 from t where col1 in (?, ?, ?)'; +set @a=-128, @b=107, @c=-128; +--sorted_result +execute stmt using @a,@b,@c; +CREATE TABLE t0 (a int primary key) PARTITION BY HASH( a DIV 80 ) PARTITIONS 2; +insert into t0 values (1); +select a from t0 where a in (1); +create table t1 (a int primary key) partition by range (a+5) ( + partition p0 values less than(10), partition p1 values less than(20)); +insert into t1 values (5); +select a from t1 where a in (5); +create table t2 (a int primary key) partition by list (a+5) ( + partition p0 values in (5, 6, 7, 8), partition p1 values in (9, 10, 11, 12)); +insert into t2 values (5); +select a from t2 where a in (5); +set @@tidb_partition_prune_mode = default; +set @@session.tidb_enable_list_partition = default; + +# TestIssue25598 +drop table if exists UK_HP16726; +CREATE TABLE UK_HP16726 ( + COL1 bigint(16) DEFAULT NULL, + COL2 varchar(20) DEFAULT NULL, + COL4 datetime DEFAULT NULL, + COL3 bigint(20) DEFAULT NULL, + COL5 float DEFAULT NULL, + UNIQUE KEY UK_COL1 (COL1) /*!80000 INVISIBLE */ + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin + PARTITION BY HASH( COL1 ) + PARTITIONS 25; +select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +explain format='brief' select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +set @@tidb_partition_prune_mode = 'dynamic'; +analyze table UK_HP16726; +select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +explain format='brief' select t1. col1, t2. col1 from UK_HP16726 as t1 inner join UK_HP16726 as t2 on t1.col1 = t2.col1 where t1.col1 > -9223372036854775808 group by t1.col1, t2.col1 having t1.col1 != 9223372036854775807; +set @@tidb_partition_prune_mode = default; + +# TestIssue25253 +drop table if exists IDT_HP23902, t; +CREATE TABLE IDT_HP23902 ( + COL1 smallint DEFAULT NULL, + COL2 varchar(20) DEFAULT NULL, + COL4 datetime DEFAULT NULL, + COL3 bigint DEFAULT NULL, + COL5 float DEFAULT NULL, + KEY UK_COL1 (COL1) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY HASH( COL1+30 ) +PARTITIONS 6; +insert ignore into IDT_HP23902 partition(p0, p1)(col1, col3) values(-10355, 1930590137900568573), (13810, -1332233145730692137); +show warnings; +select * from IDT_HP23902; +create table t ( + a int +) partition by range(a) ( + partition p0 values less than (10), + partition p1 values less than (20)); +insert ignore into t partition(p0)(a) values(12); +show warnings; +select * from t; + +# TestIssue25030 +drop table if exists tbl_936; +set @@tidb_partition_prune_mode = 'dynamic'; +CREATE TABLE tbl_936 ( + col_5410 smallint NOT NULL, + col_5411 double, + col_5412 boolean NOT NULL DEFAULT 1, + col_5413 set('Alice', 'Bob', 'Charlie', 'David') NOT NULL DEFAULT 'Charlie', + col_5414 varbinary(147) COLLATE 'binary' DEFAULT 'bvpKgYWLfyuTiOYSkj', + col_5415 timestamp NOT NULL DEFAULT '2021-07-06', + col_5416 decimal(6, 6) DEFAULT 0.49, + col_5417 text COLLATE utf8_bin, + col_5418 float DEFAULT 2048.0762299371554, + col_5419 int UNSIGNED NOT NULL DEFAULT 3152326370, + PRIMARY KEY (col_5419) ) + PARTITION BY HASH (col_5419) PARTITIONS 3; +SELECT last_value(col_5414) OVER w FROM tbl_936 + WINDOW w AS (ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419) + ORDER BY col_5410, col_5411, col_5412, col_5413, col_5414, col_5415, col_5416, col_5417, col_5418, col_5419, nth_value(col_5412, 5) OVER w; +set @@tidb_partition_prune_mode = default; + +# TestIssue24636 +drop table if exists t; +CREATE TABLE t (a int, b date, c int, PRIMARY KEY (a,b)) +PARTITION BY RANGE ( TO_DAYS(b) ) ( + PARTITION p0 VALUES LESS THAN (737821), + PARTITION p1 VALUES LESS THAN (738289) +); +INSERT INTO t (a, b, c) VALUES(0, '2021-05-05', 0); +select c from t use index(primary) where a=0 limit 1; +CREATE TABLE test_partition ( + a varchar(100) NOT NULL, + b date NOT NULL, + c varchar(100) NOT NULL, + d datetime DEFAULT NULL, + e datetime DEFAULT NULL, + f bigint(20) DEFAULT NULL, + g bigint(20) DEFAULT NULL, + h bigint(20) DEFAULT NULL, + i bigint(20) DEFAULT NULL, + j bigint(20) DEFAULT NULL, + k bigint(20) DEFAULT NULL, + l bigint(20) DEFAULT NULL, + PRIMARY KEY (a,b,c) /*T![clustered_index] NONCLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin +PARTITION BY RANGE ( TO_DAYS(b) ) ( + PARTITION pmin VALUES LESS THAN (737821), + PARTITION p20200601 VALUES LESS THAN (738289)); +INSERT INTO test_partition (a, b, c, d, e, f, g, h, i, j, k, l) VALUES('aaa', '2021-05-05', '428ff6a1-bb37-42ac-9883-33d7a29961e6', '2021-05-06 08:13:38', '2021-05-06 13:28:08', 0, 8, 3, 0, 9, 1, 0); +select c,j,l from test_partition where c='428ff6a1-bb37-42ac-9883-33d7a29961e6' and a='aaa' limit 0, 200; + +# TestIssue25309 +drop table if exists tbl_500, tbl_600; +set @@tidb_partition_prune_mode = 'dynamic'; +CREATE TABLE tbl_500 ( + col_20 tinyint(4) NOT NULL, + col_21 varchar(399) CHARACTER SET utf8 COLLATE utf8_unicode_ci DEFAULT NULL, + col_22 json DEFAULT NULL, + col_23 blob DEFAULT NULL, + col_24 mediumint(9) NOT NULL, + col_25 float NOT NULL DEFAULT '7306.384497585912', + col_26 binary(196) NOT NULL, + col_27 timestamp DEFAULT '1976-12-08 00:00:00', + col_28 bigint(20) NOT NULL, + col_29 tinyint(1) NOT NULL DEFAULT '1', + PRIMARY KEY (col_29,col_20) /*T![clustered_index] NONCLUSTERED */, + KEY idx_7 (col_28,col_20,col_26,col_27,col_21,col_24), + KEY idx_8 (col_25,col_29,col_24) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +CREATE TABLE tbl_600 ( + col_60 int(11) NOT NULL DEFAULT '-776833487', + col_61 tinyint(1) NOT NULL DEFAULT '1', + col_62 tinyint(4) NOT NULL DEFAULT '-125', + PRIMARY KEY (col_62,col_60,col_61) /*T![clustered_index] NONCLUSTERED */, + KEY idx_19 (col_60) + ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci + PARTITION BY HASH( col_60 ) + PARTITIONS 1; +insert into tbl_500 select -34, 'lrfGPPPUuZjtT', '{"obj1": {"sub_obj0": 100}}', 0x6C47636D, 1325624, 7306.3843, 'abc', '1976-12-08', 4757891479624162031, 0; +select tbl_5.* from tbl_500 tbl_5 where col_24 in ( select col_62 from tbl_600 where tbl_5.col_26 < 'hSvHLdQeGBNIyOFXStV' ); +set @@tidb_partition_prune_mode = default; + +# TestIssue20028 +drop table if exists t1, t2; +set @@tidb_partition_prune_mode='static-only'; +create table t1 (c_datetime datetime, primary key (c_datetime)) +partition by range (to_days(c_datetime)) ( partition p0 values less than (to_days('2020-02-01')), +partition p1 values less than (to_days('2020-04-01')), +partition p2 values less than (to_days('2020-06-01')), +partition p3 values less than maxvalue); +create table t2 (c_datetime datetime, unique key(c_datetime)); +insert into t1 values ('2020-06-26 03:24:00'), ('2020-02-21 07:15:33'), ('2020-04-27 13:50:58'); +insert into t2 values ('2020-01-10 09:36:00'), ('2020-02-04 06:00:00'), ('2020-06-12 03:45:18'); +begin; +--sorted_result +select * from t1 join t2 on t1.c_datetime >= t2.c_datetime for update; +rollback; +set @@tidb_partition_prune_mode = default; + +# TestIssue21731 +drop table if exists p, t; +set @@tidb_enable_list_partition = OFF; +create table t (a int, b int, unique index idx(a)) partition by list columns(b) (partition p0 values in (1), partition p1 values in (2)); +set @@tidb_enable_list_partition = default; + +# TestIssue25528 +drop table if exists issue25528; +set @@tidb_partition_prune_mode = 'static'; +create table issue25528 (id int primary key, balance DECIMAL(10, 2), balance2 DECIMAL(10, 2) GENERATED ALWAYS AS (-balance) VIRTUAL, created_at TIMESTAMP) PARTITION BY HASH(id) PARTITIONS 8; +insert into issue25528 (id, balance, created_at) values(1, 100, '2021-06-17 22:35:20'); +begin pessimistic; +select * from issue25528 where id = 1 for update; +drop table if exists issue25528; +CREATE TABLE `issue25528` ( `c1` int(11) NOT NULL, `c2` int(11) DEFAULT NULL, `c3` int(11) DEFAULT NULL, `c4` int(11) DEFAULT NULL, PRIMARY KEY (`c1`) /*T![clustered_index] CLUSTERED */, KEY `k2` (`c2`), KEY `k3` (`c3`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin PARTITION BY HASH( `c1` ) PARTITIONS 10; +INSERT INTO issue25528 (`c1`, `c2`, `c3`, `c4`) VALUES (1, 1, 1, 1) , (3, 3, 3, 3) , (2, 2, 2, 2) , (4, 4, 4, 4); +select * from issue25528 where c1 in (3, 4) order by c2 for update; +rollback; +set @@tidb_enable_list_partition = default; + +# TestIssue27346 +set @@tidb_enable_index_merge=1,@@tidb_partition_prune_mode='dynamic'; +DROP TABLE IF EXISTS `tbl_18`; +CREATE TABLE `tbl_18` (`col_119` binary(16) NOT NULL DEFAULT 'skPoKiwYUi',`col_120` int(10) unsigned NOT NULL,`col_121` timestamp NOT NULL,`col_122` double NOT NULL DEFAULT '3937.1887880628115',`col_123` bigint(20) NOT NULL DEFAULT '3550098074891542725',PRIMARY KEY (`col_123`,`col_121`,`col_122`,`col_120`) CLUSTERED,UNIQUE KEY `idx_103` (`col_123`,`col_119`,`col_120`),UNIQUE KEY `idx_104` (`col_122`,`col_120`),UNIQUE KEY `idx_105` (`col_119`,`col_120`),KEY `idx_106` (`col_121`,`col_120`,`col_122`,`col_119`),KEY `idx_107` (`col_121`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci PARTITION BY HASH( `col_120` ) PARTITIONS 3; +INSERT INTO tbl_18 (`col_119`, `col_120`, `col_121`, `col_122`, `col_123`) VALUES (X'736b506f4b6977595569000000000000', 672436701, '1974-02-24 00:00:00', 3937.1887880628115e0, -7373106839136381229), (X'736b506f4b6977595569000000000000', 2637316689, '1993-10-29 00:00:00', 3937.1887880628115e0, -4522626077860026631), (X'736b506f4b6977595569000000000000', 831809724, '1995-11-20 00:00:00', 3937.1887880628115e0, -4426441253940231780), (X'736b506f4b6977595569000000000000', 1588592628, '2001-03-28 00:00:00', 3937.1887880628115e0, 1329207475772244999), (X'736b506f4b6977595569000000000000', 3908038471, '2031-06-06 00:00:00', 3937.1887880628115e0, -6562815696723135786), (X'736b506f4b6977595569000000000000', 1674237178, '2001-10-24 00:00:00', 3937.1887880628115e0, -6459065549188938772), (X'736b506f4b6977595569000000000000', 3507075493, '2010-03-25 00:00:00', 3937.1887880628115e0, -4329597025765326929), (X'736b506f4b6977595569000000000000', 1276461709, '2019-07-20 00:00:00', 3937.1887880628115e0, 3550098074891542725); +--sorted_result +select col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update; +--sorted_result +select /*+ use_index_merge( tbl_18 ) */ col_120,col_122,col_123 from tbl_18 where tbl_18.col_122 = 4763.320888074281 and not( tbl_18.col_121 in ( '2032-11-01' , '1975-05-21' , '1994-05-16' , '1984-01-15' ) ) or not( tbl_18.col_121 >= '2008-10-24' ) order by tbl_18.col_119,tbl_18.col_120,tbl_18.col_121,tbl_18.col_122,tbl_18.col_123 limit 919 for update; +set @@tidb_enable_index_merge=default,@@tidb_partition_prune_mode=default; + +# TestIssue35181 +drop table if exists t; +CREATE TABLE `t` (`a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL) PARTITION BY RANGE (`a`) (PARTITION `p0` VALUES LESS THAN (2021), PARTITION `p1` VALUES LESS THAN (3000)); +set @@tidb_partition_prune_mode = 'static'; +insert into t select * from t where a=3000; +set @@tidb_partition_prune_mode = 'dynamic'; +insert into t select * from t where a=3000; +set @@tidb_partition_prune_mode = default; + +# TestIssue39999 +set @@tidb_opt_advanced_join_hint=0; +drop table if exists c, t; +CREATE TABLE `c` (`serial_id` varchar(24),`occur_trade_date` date,`txt_account_id` varchar(24),`capital_sub_class` varchar(10),`occur_amount` decimal(16,2),`broker` varchar(10),PRIMARY KEY (`txt_account_id`,`occur_trade_date`,`serial_id`) /*T![clustered_index] CLUSTERED */,KEY `idx_serial_id` (`serial_id`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci PARTITION BY RANGE COLUMNS(`serial_id`) (PARTITION `p202209` VALUES LESS THAN ('20221001'),PARTITION `p202210` VALUES LESS THAN ('20221101'),PARTITION `p202211` VALUES LESS THAN ('20221201')); +CREATE TABLE `t` ( `txn_account_id` varchar(24), `account_id` varchar(32), `broker` varchar(10), PRIMARY KEY (`txn_account_id`) /*T![clustered_index] CLUSTERED */ ) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_unicode_ci; +INSERT INTO `c` (serial_id, txt_account_id, capital_sub_class, occur_trade_date, occur_amount, broker) VALUES ('2022111700196920','04482786','CUST','2022-11-17',-2.01,'0009'); +INSERT INTO `t` VALUES ('04482786','1142927','0009'); +set tidb_partition_prune_mode='dynamic'; +analyze table c; +analyze table t; +explain select + /*+ inl_join(c) */ + c.occur_amount +from + c + join t on c.txt_account_id = t.txn_account_id + and t.broker = '0009' + and c.occur_trade_date = '2022-11-17'; +select + /*+ inl_join(c) */ + c.occur_amount +from + c + join t on c.txt_account_id = t.txn_account_id + and t.broker = '0009' + and c.occur_trade_date = '2022-11-17'; +alter table t add column serial_id varchar(24) default '2022111700196920'; +select + /*+ inl_join(c) */ + c.occur_amount +from + c + join t on c.txt_account_id = t.txn_account_id + and t.broker = '0009' + and c.occur_trade_date = '2022-11-17' and c.serial_id = t.serial_id; +explain select + /*+ inl_join(c) */ + c.occur_amount +from + c + join t on c.txt_account_id = t.txn_account_id + and t.broker = '0009' + and c.occur_trade_date = '2022-11-17' and c.serial_id = t.serial_id; +set @@tidb_opt_advanced_join_hint=default; +set tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/t/executor/partition/partition_boundaries.test b/tests/integrationtest/t/executor/partition/partition_boundaries.test new file mode 100644 index 0000000000000..7194aac64c3ab --- /dev/null +++ b/tests/integrationtest/t/executor/partition/partition_boundaries.test @@ -0,0 +1,1577 @@ +# TestRangePartitionBoundariesEq +SET @@tidb_partition_prune_mode = 'dynamic'; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1000000), + PARTITION p1 VALUES LESS THAN (2000000), + PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a = -2147483648; +--sorted_result +SELECT * FROM t WHERE a = -2147483648; +explain format='brief' SELECT * FROM t WHERE a IN (-2147483648); +--sorted_result +SELECT * FROM t WHERE a IN (-2147483648); +explain format='brief' SELECT * FROM t WHERE a = 0; +--sorted_result +SELECT * FROM t WHERE a = 0; +explain format='brief' SELECT * FROM t WHERE a IN (0); +--sorted_result +SELECT * FROM t WHERE a IN (0); +explain format='brief' SELECT * FROM t WHERE a = 999998; +--sorted_result +SELECT * FROM t WHERE a = 999998; +explain format='brief' SELECT * FROM t WHERE a IN (999998); +--sorted_result +SELECT * FROM t WHERE a IN (999998); +explain format='brief' SELECT * FROM t WHERE a = 999999; +--sorted_result +SELECT * FROM t WHERE a = 999999; +explain format='brief' SELECT * FROM t WHERE a IN (999999); +--sorted_result +SELECT * FROM t WHERE a IN (999999); +explain format='brief' SELECT * FROM t WHERE a = 1000000; +--sorted_result +SELECT * FROM t WHERE a = 1000000; +explain format='brief' SELECT * FROM t WHERE a IN (1000000); +--sorted_result +SELECT * FROM t WHERE a IN (1000000); +explain format='brief' SELECT * FROM t WHERE a = 1000001; +--sorted_result +SELECT * FROM t WHERE a = 1000001; +explain format='brief' SELECT * FROM t WHERE a IN (1000001); +--sorted_result +SELECT * FROM t WHERE a IN (1000001); +explain format='brief' SELECT * FROM t WHERE a = 1000002; +--sorted_result +SELECT * FROM t WHERE a = 1000002; +explain format='brief' SELECT * FROM t WHERE a IN (1000002); +--sorted_result +SELECT * FROM t WHERE a IN (1000002); +explain format='brief' SELECT * FROM t WHERE a = 3000000; +--sorted_result +SELECT * FROM t WHERE a = 3000000; +explain format='brief' SELECT * FROM t WHERE a IN (3000000); +--sorted_result +SELECT * FROM t WHERE a IN (3000000); +explain format='brief' SELECT * FROM t WHERE a = 3000001; +--sorted_result +SELECT * FROM t WHERE a = 3000001; +explain format='brief' SELECT * FROM t WHERE a IN (3000001); +--sorted_result +SELECT * FROM t WHERE a IN (3000001); +explain format='brief' SELECT * FROM t WHERE a IN (-2147483648, -2147483647); +--sorted_result +SELECT * FROM t WHERE a IN (-2147483648, -2147483647); +explain format='brief' SELECT * FROM t WHERE a IN (-2147483647, -2147483646); +--sorted_result +SELECT * FROM t WHERE a IN (-2147483647, -2147483646); +explain format='brief' SELECT * FROM t WHERE a IN (999997, 999998, 999999); +--sorted_result +SELECT * FROM t WHERE a IN (999997, 999998, 999999); +explain format='brief' SELECT * FROM t WHERE a IN (999998, 999999, 1000000); +--sorted_result +SELECT * FROM t WHERE a IN (999998, 999999, 1000000); +explain format='brief' SELECT * FROM t WHERE a IN (999999, 1000000, 1000001); +--sorted_result +SELECT * FROM t WHERE a IN (999999, 1000000, 1000001); +explain format='brief' SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002); +--sorted_result +SELECT * FROM t WHERE a IN (1000000, 1000001, 1000002); +explain format='brief' SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999); +--sorted_result +SELECT * FROM t WHERE a IN (1999997, 1999998, 1999999); +explain format='brief' SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000); +--sorted_result +SELECT * FROM t WHERE a IN (1999998, 1999999, 2000000); +explain format='brief' SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001); +--sorted_result +SELECT * FROM t WHERE a IN (1999999, 2000000, 2000001); +explain format='brief' SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002); +--sorted_result +SELECT * FROM t WHERE a IN (2000000, 2000001, 2000002); +explain format='brief' SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999); +--sorted_result +SELECT * FROM t WHERE a IN (2999997, 2999998, 2999999); +explain format='brief' SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000); +--sorted_result +SELECT * FROM t WHERE a IN (2999998, 2999999, 3000000); +explain format='brief' SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001); +--sorted_result +SELECT * FROM t WHERE a IN (2999999, 3000000, 3000001); +explain format='brief' SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002); +--sorted_result +SELECT * FROM t WHERE a IN (3000000, 3000001, 3000002); +SET @@tidb_partition_prune_mode = default; + +# TestRangePartitionBoundariesNe +SET @@tidb_partition_prune_mode = 'dynamic'; +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1), + PARTITION p1 VALUES LESS THAN (2), + PARTITION p2 VALUES LESS THAN (3), + PARTITION p3 VALUES LESS THAN (4), + PARTITION p4 VALUES LESS THAN (5), + PARTITION p5 VALUES LESS THAN (6), + PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a != -1; +--sorted_result +SELECT * FROM t WHERE a != -1; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1; +explain format='brief' SELECT * FROM t WHERE a != 0; +--sorted_result +SELECT * FROM t WHERE a != 0; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0; +explain format='brief' SELECT * FROM t WHERE a != 1; +--sorted_result +SELECT * FROM t WHERE a != 1; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1; +explain format='brief' SELECT * FROM t WHERE a != 2; +--sorted_result +SELECT * FROM t WHERE a != 2; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2; +explain format='brief' SELECT * FROM t WHERE a != 3; +--sorted_result +SELECT * FROM t WHERE a != 3; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3; +explain format='brief' SELECT * FROM t WHERE a != 4; +--sorted_result +SELECT * FROM t WHERE a != 4; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4; +explain format='brief' SELECT * FROM t WHERE a != 5; +--sorted_result +SELECT * FROM t WHERE a != 5; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5; +explain format='brief' SELECT * FROM t WHERE a != 6; +--sorted_result +SELECT * FROM t WHERE a != 6; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6; +explain format='brief' SELECT * FROM t WHERE a != 7; +--sorted_result +SELECT * FROM t WHERE a != 7; +explain format='brief' SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7; +--sorted_result +SELECT * FROM t WHERE 1 = 1 AND a != -1 AND a != 0 AND a != 1 AND a != 2 AND a != 3 AND a != 4 AND a != 5 AND a != 6 AND a != 7; +explain format='brief' SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7); +--sorted_result +SELECT * FROM t WHERE a NOT IN (-2, -1, 0, 1, 2, 3, 4, 5, 6, 7); +explain format='brief' SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7; +--sorted_result +SELECT * FROM t WHERE 1 = 0 OR a = -1 OR a = 0 OR a = 1 OR a = 2 OR a = 3 OR a = 4 OR a = 5 OR a = 6 OR a = 7; +SET @@tidb_partition_prune_mode = default; + +# TestRangePartitionBoundariesBetweenM +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1000000), + PARTITION p1 VALUES LESS THAN (2000000), + PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483649; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483648; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483647; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483646; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2147483638; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483650; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483649; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483648; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483647; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -2147483648 AND -2146483646; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND -1; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND -1; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 0; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 0; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 1; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 2; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 2; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 10; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 10; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 1000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 1000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999997; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 999997; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1000008; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999996; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999997; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 1999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999998 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1000009; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999997; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 1999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 999999 AND 2000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1000010; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 1999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000000 AND 2000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1000011; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 1999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000001 AND 2000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000004; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 1000012; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1000002 AND 2000004; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 2999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3000010; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999998; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 3999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000000 AND 4000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000003; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3000011; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 3999999; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000000; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000001; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000002; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3000001 AND 4000003; + +# TestRangePartitionBoundariesBetweenS +DROP TABLE IF EXISTS t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1), + PARTITION p1 VALUES LESS THAN (2), + PARTITION p2 VALUES LESS THAN (3), + PARTITION p3 VALUES LESS THAN (4), + PARTITION p4 VALUES LESS THAN (5), + PARTITION p5 VALUES LESS THAN (6), + PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND -1; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND -1; +explain format='brief' SELECT * FROM t WHERE a BETWEEN -1 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN -1 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 0; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 0; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 0 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 0 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 1; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 1; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 1 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 1 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 2; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 2; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 3; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 3; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 3 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 3 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 4 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 4 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 5; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 5; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 5 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 5 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 6; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 6; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 6 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 6 AND 4; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 2 AND 7; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 2 AND 7; +explain format='brief' SELECT * FROM t WHERE a BETWEEN 7 AND 4; +--sorted_result +SELECT * FROM t WHERE a BETWEEN 7 AND 4; + +# TestRangePartitionBoundariesLtM +set @@tidb_partition_prune_mode = 'dynamic'; +drop table if exists t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1000000), + PARTITION p1 VALUES LESS THAN (2000000), + PARTITION p2 VALUES LESS THAN (3000000)); +INSERT INTO t VALUES (999998, '999998 Filler ...'), (999999, '999999 Filler ...'), (1000000, '1000000 Filler ...'), (1000001, '1000001 Filler ...'), (1000002, '1000002 Filler ...'); +INSERT INTO t VALUES (1999998, '1999998 Filler ...'), (1999999, '1999999 Filler ...'), (2000000, '2000000 Filler ...'), (2000001, '2000001 Filler ...'), (2000002, '2000002 Filler ...'); +INSERT INTO t VALUES (2999998, '2999998 Filler ...'), (2999999, '2999999 Filler ...'); +INSERT INTO t VALUES (-2147483648, 'MIN_INT filler...'), (0, '0 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a < -2147483648; +--sorted_result +SELECT * FROM t WHERE a < -2147483648; +explain format='brief' SELECT * FROM t WHERE a > -2147483648; +--sorted_result +SELECT * FROM t WHERE a > -2147483648; +explain format='brief' SELECT * FROM t WHERE a <= -2147483648; +--sorted_result +SELECT * FROM t WHERE a <= -2147483648; +explain format='brief' SELECT * FROM t WHERE a >= -2147483648; +--sorted_result +SELECT * FROM t WHERE a >= -2147483648; +explain format='brief' SELECT * FROM t WHERE a < 0; +--sorted_result +SELECT * FROM t WHERE a < 0; +explain format='brief' SELECT * FROM t WHERE a > 0; +--sorted_result +SELECT * FROM t WHERE a > 0; +explain format='brief' SELECT * FROM t WHERE a <= 0; +--sorted_result +SELECT * FROM t WHERE a <= 0; +explain format='brief' SELECT * FROM t WHERE a >= 0; +--sorted_result +SELECT * FROM t WHERE a >= 0; +explain format='brief' SELECT * FROM t WHERE a < 999998; +--sorted_result +SELECT * FROM t WHERE a < 999998; +explain format='brief' SELECT * FROM t WHERE a > 999998; +--sorted_result +SELECT * FROM t WHERE a > 999998; +explain format='brief' SELECT * FROM t WHERE a <= 999998; +--sorted_result +SELECT * FROM t WHERE a <= 999998; +explain format='brief' SELECT * FROM t WHERE a >= 999998; +--sorted_result +SELECT * FROM t WHERE a >= 999998; +explain format='brief' SELECT * FROM t WHERE a < 999999; +--sorted_result +SELECT * FROM t WHERE a < 999999; +explain format='brief' SELECT * FROM t WHERE a > 999999; +--sorted_result +SELECT * FROM t WHERE a > 999999; +explain format='brief' SELECT * FROM t WHERE a <= 999999; +--sorted_result +SELECT * FROM t WHERE a <= 999999; +explain format='brief' SELECT * FROM t WHERE a >= 999999; +--sorted_result +SELECT * FROM t WHERE a >= 999999; +explain format='brief' SELECT * FROM t WHERE a < 1000000; +--sorted_result +SELECT * FROM t WHERE a < 1000000; +explain format='brief' SELECT * FROM t WHERE a > 1000000; +--sorted_result +SELECT * FROM t WHERE a > 1000000; +explain format='brief' SELECT * FROM t WHERE a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a >= 1000000; +--sorted_result +SELECT * FROM t WHERE a >= 1000000; +explain format='brief' SELECT * FROM t WHERE a < 1000001; +--sorted_result +SELECT * FROM t WHERE a < 1000001; +explain format='brief' SELECT * FROM t WHERE a > 1000001; +--sorted_result +SELECT * FROM t WHERE a > 1000001; +explain format='brief' SELECT * FROM t WHERE a <= 1000001; +--sorted_result +SELECT * FROM t WHERE a <= 1000001; +explain format='brief' SELECT * FROM t WHERE a >= 1000001; +--sorted_result +SELECT * FROM t WHERE a >= 1000001; +explain format='brief' SELECT * FROM t WHERE a < 1000002; +--sorted_result +SELECT * FROM t WHERE a < 1000002; +explain format='brief' SELECT * FROM t WHERE a > 1000002; +--sorted_result +SELECT * FROM t WHERE a > 1000002; +explain format='brief' SELECT * FROM t WHERE a <= 1000002; +--sorted_result +SELECT * FROM t WHERE a <= 1000002; +explain format='brief' SELECT * FROM t WHERE a >= 1000002; +--sorted_result +SELECT * FROM t WHERE a >= 1000002; +explain format='brief' SELECT * FROM t WHERE a < 3000000; +--sorted_result +SELECT * FROM t WHERE a < 3000000; +explain format='brief' SELECT * FROM t WHERE a > 3000000; +--sorted_result +SELECT * FROM t WHERE a > 3000000; +explain format='brief' SELECT * FROM t WHERE a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a >= 3000000; +--sorted_result +SELECT * FROM t WHERE a >= 3000000; +explain format='brief' SELECT * FROM t WHERE a < 3000001; +--sorted_result +SELECT * FROM t WHERE a < 3000001; +explain format='brief' SELECT * FROM t WHERE a > 3000001; +--sorted_result +SELECT * FROM t WHERE a > 3000001; +explain format='brief' SELECT * FROM t WHERE a <= 3000001; +--sorted_result +SELECT * FROM t WHERE a <= 3000001; +explain format='brief' SELECT * FROM t WHERE a >= 3000001; +--sorted_result +SELECT * FROM t WHERE a >= 3000001; +explain format='brief' SELECT * FROM t WHERE a < 999997; +--sorted_result +SELECT * FROM t WHERE a < 999997; +explain format='brief' SELECT * FROM t WHERE a > 999997; +--sorted_result +SELECT * FROM t WHERE a > 999997; +explain format='brief' SELECT * FROM t WHERE a <= 999997; +--sorted_result +SELECT * FROM t WHERE a <= 999997; +explain format='brief' SELECT * FROM t WHERE a >= 999997; +--sorted_result +SELECT * FROM t WHERE a >= 999997; +explain format='brief' SELECT * FROM t WHERE a >= 999997 AND a <= 999999; +--sorted_result +SELECT * FROM t WHERE a >= 999997 AND a <= 999999; +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a <= 999999; +--sorted_result +SELECT * FROM t WHERE a > 999997 AND a <= 999999; +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a < 999999; +--sorted_result +SELECT * FROM t WHERE a > 999997 AND a < 999999; +explain format='brief' SELECT * FROM t WHERE a > 999997 AND a <= 999999; +--sorted_result +SELECT * FROM t WHERE a > 999997 AND a <= 999999; +explain format='brief' SELECT * FROM t WHERE a < 999998; +--sorted_result +SELECT * FROM t WHERE a < 999998; +explain format='brief' SELECT * FROM t WHERE a > 999998; +--sorted_result +SELECT * FROM t WHERE a > 999998; +explain format='brief' SELECT * FROM t WHERE a <= 999998; +--sorted_result +SELECT * FROM t WHERE a <= 999998; +explain format='brief' SELECT * FROM t WHERE a >= 999998; +--sorted_result +SELECT * FROM t WHERE a >= 999998; +explain format='brief' SELECT * FROM t WHERE a >= 999998 AND a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a >= 999998 AND a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a < 1000000; +--sorted_result +SELECT * FROM t WHERE a > 999998 AND a < 1000000; +explain format='brief' SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a > 999998 AND a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a < 999999; +--sorted_result +SELECT * FROM t WHERE a < 999999; +explain format='brief' SELECT * FROM t WHERE a > 999999; +--sorted_result +SELECT * FROM t WHERE a > 999999; +explain format='brief' SELECT * FROM t WHERE a <= 999999; +--sorted_result +SELECT * FROM t WHERE a <= 999999; +explain format='brief' SELECT * FROM t WHERE a >= 999999; +--sorted_result +SELECT * FROM t WHERE a >= 999999; +explain format='brief' SELECT * FROM t WHERE a >= 999999 AND a <= 1000001; +--sorted_result +SELECT * FROM t WHERE a >= 999999 AND a <= 1000001; +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +--sorted_result +SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a < 1000001; +--sorted_result +SELECT * FROM t WHERE a > 999999 AND a < 1000001; +explain format='brief' SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +--sorted_result +SELECT * FROM t WHERE a > 999999 AND a <= 1000001; +explain format='brief' SELECT * FROM t WHERE a < 1000000; +--sorted_result +SELECT * FROM t WHERE a < 1000000; +explain format='brief' SELECT * FROM t WHERE a > 1000000; +--sorted_result +SELECT * FROM t WHERE a > 1000000; +explain format='brief' SELECT * FROM t WHERE a <= 1000000; +--sorted_result +SELECT * FROM t WHERE a <= 1000000; +explain format='brief' SELECT * FROM t WHERE a >= 1000000; +--sorted_result +SELECT * FROM t WHERE a >= 1000000; +explain format='brief' SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002; +--sorted_result +SELECT * FROM t WHERE a >= 1000000 AND a <= 1000002; +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +--sorted_result +SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a < 1000002; +--sorted_result +SELECT * FROM t WHERE a > 1000000 AND a < 1000002; +explain format='brief' SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +--sorted_result +SELECT * FROM t WHERE a > 1000000 AND a <= 1000002; +explain format='brief' SELECT * FROM t WHERE a < 1999997; +--sorted_result +SELECT * FROM t WHERE a < 1999997; +explain format='brief' SELECT * FROM t WHERE a > 1999997; +--sorted_result +SELECT * FROM t WHERE a > 1999997; +explain format='brief' SELECT * FROM t WHERE a <= 1999997; +--sorted_result +SELECT * FROM t WHERE a <= 1999997; +explain format='brief' SELECT * FROM t WHERE a >= 1999997; +--sorted_result +SELECT * FROM t WHERE a >= 1999997; +explain format='brief' SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999; +--sorted_result +SELECT * FROM t WHERE a >= 1999997 AND a <= 1999999; +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +--sorted_result +SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a < 1999999; +--sorted_result +SELECT * FROM t WHERE a > 1999997 AND a < 1999999; +explain format='brief' SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +--sorted_result +SELECT * FROM t WHERE a > 1999997 AND a <= 1999999; +explain format='brief' SELECT * FROM t WHERE a < 1999998; +--sorted_result +SELECT * FROM t WHERE a < 1999998; +explain format='brief' SELECT * FROM t WHERE a > 1999998; +--sorted_result +SELECT * FROM t WHERE a > 1999998; +explain format='brief' SELECT * FROM t WHERE a <= 1999998; +--sorted_result +SELECT * FROM t WHERE a <= 1999998; +explain format='brief' SELECT * FROM t WHERE a >= 1999998; +--sorted_result +SELECT * FROM t WHERE a >= 1999998; +explain format='brief' SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000; +--sorted_result +SELECT * FROM t WHERE a >= 1999998 AND a <= 2000000; +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +--sorted_result +SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a < 2000000; +--sorted_result +SELECT * FROM t WHERE a > 1999998 AND a < 2000000; +explain format='brief' SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +--sorted_result +SELECT * FROM t WHERE a > 1999998 AND a <= 2000000; +explain format='brief' SELECT * FROM t WHERE a < 1999999; +--sorted_result +SELECT * FROM t WHERE a < 1999999; +explain format='brief' SELECT * FROM t WHERE a > 1999999; +--sorted_result +SELECT * FROM t WHERE a > 1999999; +explain format='brief' SELECT * FROM t WHERE a <= 1999999; +--sorted_result +SELECT * FROM t WHERE a <= 1999999; +explain format='brief' SELECT * FROM t WHERE a >= 1999999; +--sorted_result +SELECT * FROM t WHERE a >= 1999999; +explain format='brief' SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001; +--sorted_result +SELECT * FROM t WHERE a >= 1999999 AND a <= 2000001; +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +--sorted_result +SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a < 2000001; +--sorted_result +SELECT * FROM t WHERE a > 1999999 AND a < 2000001; +explain format='brief' SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +--sorted_result +SELECT * FROM t WHERE a > 1999999 AND a <= 2000001; +explain format='brief' SELECT * FROM t WHERE a < 2000000; +--sorted_result +SELECT * FROM t WHERE a < 2000000; +explain format='brief' SELECT * FROM t WHERE a > 2000000; +--sorted_result +SELECT * FROM t WHERE a > 2000000; +explain format='brief' SELECT * FROM t WHERE a <= 2000000; +--sorted_result +SELECT * FROM t WHERE a <= 2000000; +explain format='brief' SELECT * FROM t WHERE a >= 2000000; +--sorted_result +SELECT * FROM t WHERE a >= 2000000; +explain format='brief' SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002; +--sorted_result +SELECT * FROM t WHERE a >= 2000000 AND a <= 2000002; +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +--sorted_result +SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a < 2000002; +--sorted_result +SELECT * FROM t WHERE a > 2000000 AND a < 2000002; +explain format='brief' SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +--sorted_result +SELECT * FROM t WHERE a > 2000000 AND a <= 2000002; +explain format='brief' SELECT * FROM t WHERE a < 2999997; +--sorted_result +SELECT * FROM t WHERE a < 2999997; +explain format='brief' SELECT * FROM t WHERE a > 2999997; +--sorted_result +SELECT * FROM t WHERE a > 2999997; +explain format='brief' SELECT * FROM t WHERE a <= 2999997; +--sorted_result +SELECT * FROM t WHERE a <= 2999997; +explain format='brief' SELECT * FROM t WHERE a >= 2999997; +--sorted_result +SELECT * FROM t WHERE a >= 2999997; +explain format='brief' SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999; +--sorted_result +SELECT * FROM t WHERE a >= 2999997 AND a <= 2999999; +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +--sorted_result +SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a < 2999999; +--sorted_result +SELECT * FROM t WHERE a > 2999997 AND a < 2999999; +explain format='brief' SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +--sorted_result +SELECT * FROM t WHERE a > 2999997 AND a <= 2999999; +explain format='brief' SELECT * FROM t WHERE a < 2999998; +--sorted_result +SELECT * FROM t WHERE a < 2999998; +explain format='brief' SELECT * FROM t WHERE a > 2999998; +--sorted_result +SELECT * FROM t WHERE a > 2999998; +explain format='brief' SELECT * FROM t WHERE a <= 2999998; +--sorted_result +SELECT * FROM t WHERE a <= 2999998; +explain format='brief' SELECT * FROM t WHERE a >= 2999998; +--sorted_result +SELECT * FROM t WHERE a >= 2999998; +explain format='brief' SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a >= 2999998 AND a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a < 3000000; +--sorted_result +SELECT * FROM t WHERE a > 2999998 AND a < 3000000; +explain format='brief' SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a > 2999998 AND a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a < 2999999; +--sorted_result +SELECT * FROM t WHERE a < 2999999; +explain format='brief' SELECT * FROM t WHERE a > 2999999; +--sorted_result +SELECT * FROM t WHERE a > 2999999; +explain format='brief' SELECT * FROM t WHERE a <= 2999999; +--sorted_result +SELECT * FROM t WHERE a <= 2999999; +explain format='brief' SELECT * FROM t WHERE a >= 2999999; +--sorted_result +SELECT * FROM t WHERE a >= 2999999; +explain format='brief' SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001; +--sorted_result +SELECT * FROM t WHERE a >= 2999999 AND a <= 3000001; +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +--sorted_result +SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a < 3000001; +--sorted_result +SELECT * FROM t WHERE a > 2999999 AND a < 3000001; +explain format='brief' SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +--sorted_result +SELECT * FROM t WHERE a > 2999999 AND a <= 3000001; +explain format='brief' SELECT * FROM t WHERE a < 3000000; +--sorted_result +SELECT * FROM t WHERE a < 3000000; +explain format='brief' SELECT * FROM t WHERE a > 3000000; +--sorted_result +SELECT * FROM t WHERE a > 3000000; +explain format='brief' SELECT * FROM t WHERE a <= 3000000; +--sorted_result +SELECT * FROM t WHERE a <= 3000000; +explain format='brief' SELECT * FROM t WHERE a >= 3000000; +--sorted_result +SELECT * FROM t WHERE a >= 3000000; +explain format='brief' SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002; +--sorted_result +SELECT * FROM t WHERE a >= 3000000 AND a <= 3000002; +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +--sorted_result +SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a < 3000002; +--sorted_result +SELECT * FROM t WHERE a > 3000000 AND a < 3000002; +explain format='brief' SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +--sorted_result +SELECT * FROM t WHERE a > 3000000 AND a <= 3000002; +set @@tidb_partition_prune_mode = default; + +# TestRangePartitionBoundariesLtS +set @@tidb_partition_prune_mode = 'dynamic'; +drop table if exists t; +CREATE TABLE t +(a INT, b varchar(255)) +PARTITION BY RANGE (a) ( + PARTITION p0 VALUES LESS THAN (1), + PARTITION p1 VALUES LESS THAN (2), + PARTITION p2 VALUES LESS THAN (3), + PARTITION p3 VALUES LESS THAN (4), + PARTITION p4 VALUES LESS THAN (5), + PARTITION p5 VALUES LESS THAN (6), + PARTITION p6 VALUES LESS THAN (7)); +INSERT INTO t VALUES (0, '0 Filler...'); +INSERT INTO t VALUES (1, '1 Filler...'); +INSERT INTO t VALUES (2, '2 Filler...'); +INSERT INTO t VALUES (3, '3 Filler...'); +INSERT INTO t VALUES (4, '4 Filler...'); +INSERT INTO t VALUES (5, '5 Filler...'); +INSERT INTO t VALUES (6, '6 Filler...'); +ANALYZE TABLE t; +explain format='brief' SELECT * FROM t WHERE a < -1; +--sorted_result +SELECT * FROM t WHERE a < -1; +explain format='brief' SELECT * FROM t WHERE a > -1; +--sorted_result +SELECT * FROM t WHERE a > -1; +explain format='brief' SELECT * FROM t WHERE a <= -1; +--sorted_result +SELECT * FROM t WHERE a <= -1; +explain format='brief' SELECT * FROM t WHERE a >= -1; +--sorted_result +SELECT * FROM t WHERE a >= -1; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > -1; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > -1; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < -1; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < -1; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > -1); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > -1); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < -1); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < -1); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= -1; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= -1; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < -1; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < -1; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= -1); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= -1); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < -1); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < -1); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > -1; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > -1; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= -1; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= -1; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > -1); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > -1); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= -1); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= -1); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= -1; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= -1; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= -1; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= -1; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= -1); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= -1); +explain format='brief' SELECT * FROM t WHERE a < 0; +--sorted_result +SELECT * FROM t WHERE a < 0; +explain format='brief' SELECT * FROM t WHERE a > 0; +--sorted_result +SELECT * FROM t WHERE a > 0; +explain format='brief' SELECT * FROM t WHERE a <= 0; +--sorted_result +SELECT * FROM t WHERE a <= 0; +explain format='brief' SELECT * FROM t WHERE a >= 0; +--sorted_result +SELECT * FROM t WHERE a >= 0; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 0; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 0; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 0; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 0; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 0); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 0); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 0); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 0); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 0; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 0; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 0; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 0; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 0); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 0); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 0); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 0); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 0; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 0; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 0; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 0; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 0); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 0); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 0); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 0); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 0; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 0; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 0; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 0; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 0); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 0); +explain format='brief' SELECT * FROM t WHERE a < 1; +--sorted_result +SELECT * FROM t WHERE a < 1; +explain format='brief' SELECT * FROM t WHERE a > 1; +--sorted_result +SELECT * FROM t WHERE a > 1; +explain format='brief' SELECT * FROM t WHERE a <= 1; +--sorted_result +SELECT * FROM t WHERE a <= 1; +explain format='brief' SELECT * FROM t WHERE a >= 1; +--sorted_result +SELECT * FROM t WHERE a >= 1; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 1; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 1; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 1; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 1; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 1); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 1); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 1); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 1); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 1; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 1; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 1; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 1; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 1); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 1); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 1); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 1); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 1; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 1; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 1; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 1; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 1); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 1); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 1); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 1); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 1; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 1; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 1; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 1; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 1); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 1); +explain format='brief' SELECT * FROM t WHERE a < 2; +--sorted_result +SELECT * FROM t WHERE a < 2; +explain format='brief' SELECT * FROM t WHERE a > 2; +--sorted_result +SELECT * FROM t WHERE a > 2; +explain format='brief' SELECT * FROM t WHERE a <= 2; +--sorted_result +SELECT * FROM t WHERE a <= 2; +explain format='brief' SELECT * FROM t WHERE a >= 2; +--sorted_result +SELECT * FROM t WHERE a >= 2; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 2; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 2; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 2; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 2; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 2); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 2); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 2); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 2); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 2; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 2; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 2; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 2; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 2); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 2); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 2); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 2); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 2; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 2; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 2; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 2; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 2); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 2); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 2); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 2); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 2; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 2; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 2; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 2; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 2); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 2); +explain format='brief' SELECT * FROM t WHERE a < 3; +--sorted_result +SELECT * FROM t WHERE a < 3; +explain format='brief' SELECT * FROM t WHERE a > 3; +--sorted_result +SELECT * FROM t WHERE a > 3; +explain format='brief' SELECT * FROM t WHERE a <= 3; +--sorted_result +SELECT * FROM t WHERE a <= 3; +explain format='brief' SELECT * FROM t WHERE a >= 3; +--sorted_result +SELECT * FROM t WHERE a >= 3; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 3; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 3; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 3; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 3; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 3); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 3); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 3); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 3); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 3; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 3; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 3; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 3; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 3); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 3); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 3); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 3); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 3; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 3; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 3; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 3; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 3); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 3); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 3); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 3); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 3; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 3; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 3; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 3; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 3); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 3); +explain format='brief' SELECT * FROM t WHERE a < 4; +--sorted_result +SELECT * FROM t WHERE a < 4; +explain format='brief' SELECT * FROM t WHERE a > 4; +--sorted_result +SELECT * FROM t WHERE a > 4; +explain format='brief' SELECT * FROM t WHERE a <= 4; +--sorted_result +SELECT * FROM t WHERE a <= 4; +explain format='brief' SELECT * FROM t WHERE a >= 4; +--sorted_result +SELECT * FROM t WHERE a >= 4; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 4; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 4; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 4; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 4; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 4); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 4); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 4); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 4); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 4; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 4; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 4; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 4; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 4); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 4); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 4); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 4); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 4; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 4; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 4; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 4; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 4); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 4); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 4); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 4); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 4; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 4; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 4; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 4; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 4); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 4); +explain format='brief' SELECT * FROM t WHERE a < 5; +--sorted_result +SELECT * FROM t WHERE a < 5; +explain format='brief' SELECT * FROM t WHERE a > 5; +--sorted_result +SELECT * FROM t WHERE a > 5; +explain format='brief' SELECT * FROM t WHERE a <= 5; +--sorted_result +SELECT * FROM t WHERE a <= 5; +explain format='brief' SELECT * FROM t WHERE a >= 5; +--sorted_result +SELECT * FROM t WHERE a >= 5; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 5; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 5; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 5; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 5; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 5); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 5); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 5); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 5); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 5; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 5; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 5; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 5; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 5); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 5); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 5); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 5); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 5; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 5; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 5; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 5; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 5); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 5); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 5); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 5); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 5; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 5; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 5; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 5; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 5); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 5); +explain format='brief' SELECT * FROM t WHERE a < 6; +--sorted_result +SELECT * FROM t WHERE a < 6; +explain format='brief' SELECT * FROM t WHERE a > 6; +--sorted_result +SELECT * FROM t WHERE a > 6; +explain format='brief' SELECT * FROM t WHERE a <= 6; +--sorted_result +SELECT * FROM t WHERE a <= 6; +explain format='brief' SELECT * FROM t WHERE a >= 6; +--sorted_result +SELECT * FROM t WHERE a >= 6; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 6; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 6; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 6; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 6; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 6); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 6); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 6); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 6); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 6; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 6; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 6; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 6; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 6); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 6); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 6); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 6); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 6; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 6; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 6; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 6; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 6); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 6); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 6); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 6); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 6; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 6; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 6; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 6; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 6); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 6); +explain format='brief' SELECT * FROM t WHERE a < 7; +--sorted_result +SELECT * FROM t WHERE a < 7; +explain format='brief' SELECT * FROM t WHERE a > 7; +--sorted_result +SELECT * FROM t WHERE a > 7; +explain format='brief' SELECT * FROM t WHERE a <= 7; +--sorted_result +SELECT * FROM t WHERE a <= 7; +explain format='brief' SELECT * FROM t WHERE a >= 7; +--sorted_result +SELECT * FROM t WHERE a >= 7; +explain format='brief' SELECT * FROM t WHERE a < 2 OR a > 7; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a > 7; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a < 7; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a < 7; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a > 7); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a > 7); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a < 7); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a < 7); +explain format='brief' SELECT * FROM t WHERE a < 2 OR a >= 7; +--sorted_result +SELECT * FROM t WHERE a < 2 OR a >= 7; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a < 7; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a < 7; +explain format='brief' SELECT * FROM t WHERE NOT (a < 2 OR a >= 7); +--sorted_result +SELECT * FROM t WHERE NOT (a < 2 OR a >= 7); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a < 7); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a < 7); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a > 7; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a > 7; +explain format='brief' SELECT * FROM t WHERE a > 2 AND a <= 7; +--sorted_result +SELECT * FROM t WHERE a > 2 AND a <= 7; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a > 7); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a > 7); +explain format='brief' SELECT * FROM t WHERE NOT (a > 2 AND a <= 7); +--sorted_result +SELECT * FROM t WHERE NOT (a > 2 AND a <= 7); +explain format='brief' SELECT * FROM t WHERE a <= 2 OR a >= 7; +--sorted_result +SELECT * FROM t WHERE a <= 2 OR a >= 7; +explain format='brief' SELECT * FROM t WHERE a >= 2 AND a <= 7; +--sorted_result +SELECT * FROM t WHERE a >= 2 AND a <= 7; +explain format='brief' SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7); +--sorted_result +SELECT * FROM t WHERE NOT (a <= 2 OR a >= 7); +explain format='brief' SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7); +--sorted_result +SELECT * FROM t WHERE NOT (a >= 2 AND a <= 7); +set @@tidb_partition_prune_mode = default; + diff --git a/tests/integrationtest/t/executor/partition/partition_with_expression.test b/tests/integrationtest/t/executor/partition/partition_with_expression.test new file mode 100644 index 0000000000000..dc8efe0b37369 --- /dev/null +++ b/tests/integrationtest/t/executor/partition/partition_with_expression.test @@ -0,0 +1,454 @@ +# TestDateColWithUnequalExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00")); +create table t(a datetime, b int) partition by range columns (a) (partition p0 values less than("2012-12-10 00:00:00"), partition p1 values less than("2022-12-30 00:00:00"), partition p2 values less than("2025-12-12 00:00:00")); +insert into tp values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3); +insert into t values("2015-09-09 00:00:00", 1), ("2020-08-08 19:00:01", 2), ("2024-01-01 01:01:01", 3); +analyze table tp; +analyze table t; +explain format='brief' select * from tp where a != '2024-01-01 01:01:01'; +--sorted_result +select * from tp where a != '2024-01-01 01:01:01'; +--sorted_result +select * from t where a != '2024-01-01 01:01:01'; +explain format='brief' select * from tp where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +--sorted_result +select * from tp where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +--sorted_result +select * from t where a != '2024-01-01 01:01:01' and a > '2015-09-09 00:00:00'; +set tidb_partition_prune_mode=default; + +# TestWeekdayWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range(weekday(a)) (partition p0 values less than(3), partition p1 values less than(5), partition p2 values less than(8)); +create table t(a datetime, b int); +insert into tp values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0); +insert into t values("2020-08-17 00:00:00", 1), ("2020-08-18 00:00:00", 2), ("2020-08-19 00:00:00", 4), ("2020-08-20 00:00:00", 5), ("2020-08-21 00:00:00", 6), ("2020-08-22 00:00:00", 0); +analyze table tp; +analyze table t; +explain format='brief' select * from tp where a = '2020-08-17 00:00:00'; +--sorted_result +select * from tp where a = '2020-08-17 00:00:00'; +--sorted_result +select * from t where a = '2020-08-17 00:00:00'; +explain format='brief' select * from tp where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +--sorted_result +select * from tp where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +--sorted_result +select * from t where a= '2020-08-20 00:00:00' and a < '2020-08-22 00:00:00'; +explain format='brief' select * from tp where a < '2020-08-19 00:00:00'; +--sorted_result +select * from tp where a < '2020-08-19 00:00:00'; +--sorted_result +select * from t where a < '2020-08-19 00:00:00'; +set tidb_partition_prune_mode=default; + +# TestFloorUnixTimestampAndIntColWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a timestamp, b int) partition by range(floor(unix_timestamp(a))) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400)); +create table t(a timestamp, b int); +insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +analyze table tp; +analyze table t; +explain select * from tp where a > '2020-09-11 00:00:00'; +--sorted_result +select * from tp where a > '2020-09-11 00:00:00'; +--sorted_result +select * from t where a > '2020-09-11 00:00:00'; +explain select * from tp where a < '2020-07-07 01:00:00'; +--sorted_result +select * from tp where a < '2020-07-07 01:00:00'; +--sorted_result +select * from t where a < '2020-07-07 01:00:00'; +set tidb_partition_prune_mode=default; + +# TestUnixTimestampAndIntColWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a timestamp, b int) partition by range(unix_timestamp(a)) (partition p0 values less than(1580670000), partition p1 values less than(1597622400), partition p2 values less than(1629158400)); +create table t(a timestamp, b int); +insert into tp values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +insert into t values('2020-01-01 19:00:00', 1),('2020-08-15 00:00:00', -1), ('2020-08-18 05:00:01', 2), ('2020-10-01 14:13:15', 3); +analyze table tp; +analyze table t; +explain select * from tp where a > '2020-09-11 00:00:00'; +--sorted_result +select * from tp where a > '2020-09-11 00:00:00'; +--sorted_result +select * from t where a > '2020-09-11 00:00:00'; +explain select * from tp where a < '2020-07-07 01:00:00'; +--sorted_result +select * from tp where a < '2020-07-07 01:00:00'; +--sorted_result +select * from t where a < '2020-07-07 01:00:00'; +set tidb_partition_prune_mode=default; + +# TestDatetimeColAndIntColWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a datetime, b int) partition by range columns(a) (partition p0 values less than('2020-02-02 00:00:00'), partition p1 values less than('2020-09-01 00:00:00'), partition p2 values less than('2020-12-20 00:00:00')); +create table t(a datetime, b int); +insert into tp values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4); +insert into t values('2020-01-01 12:00:00', 1), ('2020-08-22 10:00:00', 2), ('2020-09-09 11:00:00', 3), ('2020-10-01 00:00:00', 4); +analyze table tp; +analyze table t; +explain select * from tp where a < '2020-09-01 00:00:00'; +--sorted_result +select * from tp where a < '2020-09-01 00:00:00'; +--sorted_result +select * from t where a < '2020-09-01 00:00:00'; +explain select * from tp where a > '2020-07-07 01:00:00'; +--sorted_result +select * from tp where a > '2020-07-07 01:00:00'; +--sorted_result +select * from t where a > '2020-07-07 01:00:00'; +set tidb_partition_prune_mode=default; + +# TestVarcharColAndIntColWithExpression +drop table if exists tp, t; +set tidb_partition_prune_mode='dynamic'; +create table tp(a varchar(255), b int) partition by range columns(a) (partition p0 values less than('ddd'), partition p1 values less than('ggggg'), partition p2 values less than('mmmmmm')); +create table t(a varchar(255), b int); +insert into tp values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6); +insert into t values('aaa', 1), ('bbbb', 2), ('ccc', 3), ('dfg', 4), ('kkkk', 5), ('10', 6); +analyze table tp; +analyze table t; +explain select * from tp where a < '10'; +--sorted_result +select * from tp where a < '10'; +--sorted_result +select * from t where a < '10'; +explain select * from tp where a > 0; +--sorted_result +select * from tp where a > 0; +--sorted_result +select * from t where a > 0; +explain select * from tp where a < 0; +--sorted_result +select * from tp where a < 0; +--sorted_result +select * from t where a < 0; +set tidb_partition_prune_mode=default; + +# TestDynamicPruneModeWithExpression +drop table if exists trange, thash, t; +create table trange(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11)); +create table thash(a int, b int) partition by hash(a) partitions 4; +create table t(a int, b int); +insert into trange values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +insert into thash values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +insert into t values(1, NULL), (1, NULL), (1, 1), (2, 1), (3, 2), (4, 3), (5, 5), (6, 7), (7, 7), (7, 7), (10, NULL), (NULL, NULL), (NULL, 1); +set session tidb_partition_prune_mode='dynamic'; +analyze table trange; +analyze table thash; +analyze table t; +--sorted_result +SELECT * from t where a = 2; +explain format='brief' select * from trange where a = 2; +--sorted_result +SELECT * from trange where a = 2; +explain format='brief' select * from thash where a = 2; +--sorted_result +SELECT * from thash where a = 2; +--sorted_result +SELECT * from t where a = 4 or a = 1; +explain format='brief' select * from trange where a = 4 or a = 1; +--sorted_result +SELECT * from trange where a = 4 or a = 1; +explain format='brief' select * from thash where a = 4 or a = 1; +--sorted_result +SELECT * from thash where a = 4 or a = 1; +--sorted_result +SELECT * from t where a = -1; +explain format='brief' select * from trange where a = -1; +--sorted_result +SELECT * from trange where a = -1; +explain format='brief' select * from thash where a = -1; +--sorted_result +SELECT * from thash where a = -1; +--sorted_result +SELECT * from t where a is NULL; +explain format='brief' select * from trange where a is NULL; +--sorted_result +SELECT * from trange where a is NULL; +explain format='brief' select * from thash where a is NULL; +--sorted_result +SELECT * from thash where a is NULL; +--sorted_result +SELECT * from t where b is NULL; +explain format='brief' select * from trange where b is NULL; +--sorted_result +SELECT * from trange where b is NULL; +explain format='brief' select * from thash where b is NULL; +--sorted_result +SELECT * from thash where b is NULL; +--sorted_result +SELECT * from t where a > -1; +explain format='brief' select * from trange where a > -1; +--sorted_result +SELECT * from trange where a > -1; +explain format='brief' select * from thash where a > -1; +--sorted_result +SELECT * from thash where a > -1; +--sorted_result +SELECT * from t where a >= 4 and a <= 5; +explain format='brief' select * from trange where a >= 4 and a <= 5; +--sorted_result +SELECT * from trange where a >= 4 and a <= 5; +explain format='brief' select * from thash where a >= 4 and a <= 5; +--sorted_result +SELECT * from thash where a >= 4 and a <= 5; +--sorted_result +SELECT * from t where a > 10; +explain format='brief' select * from trange where a > 10; +--sorted_result +SELECT * from trange where a > 10; +explain format='brief' select * from thash where a > 10; +--sorted_result +SELECT * from thash where a > 10; +--sorted_result +SELECT * from t where a >=2 and a <= 3; +explain format='brief' select * from trange where a >=2 and a <= 3; +--sorted_result +SELECT * from trange where a >=2 and a <= 3; +explain format='brief' select * from thash where a >=2 and a <= 3; +--sorted_result +SELECT * from thash where a >=2 and a <= 3; +--sorted_result +SELECT * from t where a between 2 and 3; +explain format='brief' select * from trange where a between 2 and 3; +--sorted_result +SELECT * from trange where a between 2 and 3; +explain format='brief' select * from thash where a between 2 and 3; +--sorted_result +SELECT * from thash where a between 2 and 3; +--sorted_result +SELECT * from t where a < 2; +explain format='brief' select * from trange where a < 2; +--sorted_result +SELECT * from trange where a < 2; +explain format='brief' select * from thash where a < 2; +--sorted_result +SELECT * from thash where a < 2; +--sorted_result +SELECT * from t where a <= 3; +explain format='brief' select * from trange where a <= 3; +--sorted_result +SELECT * from trange where a <= 3; +explain format='brief' select * from thash where a <= 3; +--sorted_result +SELECT * from thash where a <= 3; +--sorted_result +SELECT * from t where a in (2, 3); +explain format='brief' select * from trange where a in (2, 3); +--sorted_result +SELECT * from trange where a in (2, 3); +explain format='brief' select * from thash where a in (2, 3); +--sorted_result +SELECT * from thash where a in (2, 3); +--sorted_result +SELECT * from t where a in (1, 5); +explain format='brief' select * from trange where a in (1, 5); +--sorted_result +SELECT * from trange where a in (1, 5); +explain format='brief' select * from thash where a in (1, 5); +--sorted_result +SELECT * from thash where a in (1, 5); +--sorted_result +SELECT * from t where a not in (1, 5); +explain format='brief' select * from trange where a not in (1, 5); +--sorted_result +SELECT * from trange where a not in (1, 5); +explain format='brief' select * from thash where a not in (1, 5); +--sorted_result +SELECT * from thash where a not in (1, 5); +--sorted_result +SELECT * from t where a = 2 and a = 2; +explain format='brief' select * from trange where a = 2 and a = 2; +--sorted_result +SELECT * from trange where a = 2 and a = 2; +explain format='brief' select * from thash where a = 2 and a = 2; +--sorted_result +SELECT * from thash where a = 2 and a = 2; +--sorted_result +SELECT * from t where a = 2 and a = 3; +explain format='brief' select * from trange where a = 2 and a = 3; +--sorted_result +SELECT * from trange where a = 2 and a = 3; +explain format='brief' select * from thash where a = 2 and a = 3; +--sorted_result +SELECT * from thash where a = 2 and a = 3; +--sorted_result +SELECT * from t where a < 2 and a > 0; +explain format='brief' select * from trange where a < 2 and a > 0; +--sorted_result +SELECT * from trange where a < 2 and a > 0; +explain format='brief' select * from thash where a < 2 and a > 0; +--sorted_result +SELECT * from thash where a < 2 and a > 0; +--sorted_result +SELECT * from t where a < 2 and a < 3; +explain format='brief' select * from trange where a < 2 and a < 3; +--sorted_result +SELECT * from trange where a < 2 and a < 3; +explain format='brief' select * from thash where a < 2 and a < 3; +--sorted_result +SELECT * from thash where a < 2 and a < 3; +--sorted_result +SELECT * from t where a > 1 and a > 2; +explain format='brief' select * from trange where a > 1 and a > 2; +--sorted_result +SELECT * from trange where a > 1 and a > 2; +explain format='brief' select * from thash where a > 1 and a > 2; +--sorted_result +SELECT * from thash where a > 1 and a > 2; +--sorted_result +SELECT * from t where a = 2 or a = 3; +explain format='brief' select * from trange where a = 2 or a = 3; +--sorted_result +SELECT * from trange where a = 2 or a = 3; +explain format='brief' select * from thash where a = 2 or a = 3; +--sorted_result +SELECT * from thash where a = 2 or a = 3; +--sorted_result +SELECT * from t where a = 2 or a in (3); +explain format='brief' select * from trange where a = 2 or a in (3); +--sorted_result +SELECT * from trange where a = 2 or a in (3); +explain format='brief' select * from thash where a = 2 or a in (3); +--sorted_result +SELECT * from thash where a = 2 or a in (3); +--sorted_result +SELECT * from t where a = 2 or a > 3; +explain format='brief' select * from trange where a = 2 or a > 3; +--sorted_result +SELECT * from trange where a = 2 or a > 3; +explain format='brief' select * from thash where a = 2 or a > 3; +--sorted_result +SELECT * from thash where a = 2 or a > 3; +--sorted_result +SELECT * from t where a = 2 or a <= 1; +explain format='brief' select * from trange where a = 2 or a <= 1; +--sorted_result +SELECT * from trange where a = 2 or a <= 1; +explain format='brief' select * from thash where a = 2 or a <= 1; +--sorted_result +SELECT * from thash where a = 2 or a <= 1; +--sorted_result +SELECT * from t where a = 2 or a between 2 and 2; +explain format='brief' select * from trange where a = 2 or a between 2 and 2; +--sorted_result +SELECT * from trange where a = 2 or a between 2 and 2; +explain format='brief' select * from thash where a = 2 or a between 2 and 2; +--sorted_result +SELECT * from thash where a = 2 or a between 2 and 2; +--sorted_result +SELECT * from t where a != 2; +explain format='brief' select * from trange where a != 2; +--sorted_result +SELECT * from trange where a != 2; +explain format='brief' select * from thash where a != 2; +--sorted_result +SELECT * from thash where a != 2; +--sorted_result +SELECT * from t where a != 2 and a > 4; +explain format='brief' select * from trange where a != 2 and a > 4; +--sorted_result +SELECT * from trange where a != 2 and a > 4; +explain format='brief' select * from thash where a != 2 and a > 4; +--sorted_result +SELECT * from thash where a != 2 and a > 4; +--sorted_result +SELECT * from t where a != 2 and a != 3; +explain format='brief' select * from trange where a != 2 and a != 3; +--sorted_result +SELECT * from trange where a != 2 and a != 3; +explain format='brief' select * from thash where a != 2 and a != 3; +--sorted_result +SELECT * from thash where a != 2 and a != 3; +--sorted_result +SELECT * from t where a != 2 and a = 3; +explain format='brief' select * from trange where a != 2 and a = 3; +--sorted_result +SELECT * from trange where a != 2 and a = 3; +explain format='brief' select * from thash where a != 2 and a = 3; +--sorted_result +SELECT * from thash where a != 2 and a = 3; +--sorted_result +SELECT * from t where not (a = 2); +explain format='brief' select * from trange where not (a = 2); +--sorted_result +SELECT * from trange where not (a = 2); +explain format='brief' select * from thash where not (a = 2); +--sorted_result +SELECT * from thash where not (a = 2); +--sorted_result +SELECT * from t where not (a > 2); +explain format='brief' select * from trange where not (a > 2); +--sorted_result +SELECT * from trange where not (a > 2); +explain format='brief' select * from thash where not (a > 2); +--sorted_result +SELECT * from thash where not (a > 2); +--sorted_result +SELECT * from t where not (a < 2); +explain format='brief' select * from trange where not (a < 2); +--sorted_result +SELECT * from trange where not (a < 2); +explain format='brief' select * from thash where not (a < 2); +--sorted_result +SELECT * from thash where not (a < 2); +--sorted_result +SELECT * from t where a + 1 > 4; +explain format='brief' select * from trange where a + 1 > 4; +--sorted_result +SELECT * from trange where a + 1 > 4; +explain format='brief' select * from thash where a + 1 > 4; +--sorted_result +SELECT * from thash where a + 1 > 4; +--sorted_result +SELECT * from t where a - 1 > 0; +explain format='brief' select * from trange where a - 1 > 0; +--sorted_result +SELECT * from trange where a - 1 > 0; +explain format='brief' select * from thash where a - 1 > 0; +--sorted_result +SELECT * from thash where a - 1 > 0; +--sorted_result +SELECT * from t where a * 2 < 0; +explain format='brief' select * from trange where a * 2 < 0; +--sorted_result +SELECT * from trange where a * 2 < 0; +explain format='brief' select * from thash where a * 2 < 0; +--sorted_result +SELECT * from thash where a * 2 < 0; +--sorted_result +SELECT * from t where a << 1 < 0; +explain format='brief' select * from trange where a << 1 < 0; +--sorted_result +SELECT * from trange where a << 1 < 0; +explain format='brief' select * from thash where a << 1 < 0; +--sorted_result +SELECT * from thash where a << 1 < 0; +--sorted_result +SELECT * from t where a > '10'; +explain format='brief' select * from trange where a > '10'; +--sorted_result +SELECT * from trange where a > '10'; +explain format='brief' select * from thash where a > '10'; +--sorted_result +SELECT * from thash where a > '10'; +--sorted_result +SELECT * from t where a > '10ab'; +explain format='brief' select * from trange where a > '10ab'; +--sorted_result +SELECT * from trange where a > '10ab'; +explain format='brief' select * from thash where a > '10ab'; +--sorted_result +SELECT * from thash where a > '10ab'; +set tidb_partition_prune_mode=default; + diff --git a/tests/integrationtest/t/executor/partition/table.test b/tests/integrationtest/t/executor/partition/table.test new file mode 100644 index 0000000000000..d3b8b7645cfbd --- /dev/null +++ b/tests/integrationtest/t/executor/partition/table.test @@ -0,0 +1,358 @@ +# TestSetPartitionPruneMode +set @@session.tidb_partition_prune_mode = DEFAULT; +show warnings; +set @@global.tidb_partition_prune_mode = DEFAULT; +show warnings; + +connect (conn1, localhost, root,,); +select @@global.tidb_partition_prune_mode; +select @@session.tidb_partition_prune_mode; +set @@session.tidb_partition_prune_mode = "static"; +show warnings; +set @@global.tidb_partition_prune_mode = "static"; +show warnings; +connection default; +disconnect conn1; + +connect (conn1, localhost, root,,); +select @@session.tidb_partition_prune_mode; +show warnings; +select @@global.tidb_partition_prune_mode; +set @@session.tidb_partition_prune_mode = "dynamic"; +show warnings; +set @@global.tidb_partition_prune_mode = "dynamic"; +show warnings; +connection default; +disconnect conn1; + +connect (conn1, localhost, root,,); +select @@global.tidb_partition_prune_mode; +select @@session.tidb_partition_prune_mode; +connection default; +disconnect conn1; + +set @@session.tidb_partition_prune_mode = DEFAULT; +set @@global.tidb_partition_prune_mode = DEFAULT; + +# TestFourReader +drop table if exists pt; +create table pt (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( +partition p0 values less than (4), +partition p1 values less than (7), +partition p2 values less than (10)); +analyze table pt; +insert into pt values (0, 0), (2, 2), (4, 4), (6, 6), (7, 7), (9, 9), (null, null); +--sorted_result +select * from pt; +select * from pt where c > 10; +select * from pt where c > 8; +--sorted_result +select * from pt where c < 2 or c >= 9; +--sorted_result +select c from pt; +select c from pt where c > 10; +select c from pt where c > 8; +--sorted_result +select c from pt where c < 2 or c >= 9; +--sorted_result +select /*+ use_index(pt, i_id) */ * from pt; +select /*+ use_index(pt, i_id) */ * from pt where id < 4 and c > 10; +select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c > 8; +--sorted_result +select /*+ use_index(pt, i_id) */ * from pt where id < 10 and c < 2 or c >= 9; +set @@tidb_enable_index_merge = 1; +--sorted_result +select /*+ use_index(i_c, i_id) */ * from pt where id = 4 or c < 7; +set @@tidb_enable_index_merge = DEFAULT; + +# TestPartitionIndexJoin +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by range (c) ( + partition p0 values less than (4), + partition p1 values less than (7), + partition p2 values less than (10)); +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +--sorted_result +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +--sorted_result +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by list (c) ( + partition p0 values in (1,2,3,4), + partition p1 values in (5,6,7), + partition p2 values in (8, 9,10)); +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +--sorted_result +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +--sorted_result +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; +drop table if exists p, t; +create table p (id int, c int, key i_id(id), key i_c(c)) partition by hash(c) partitions 5; +create table t (id int); +insert into p values (3,3), (4,4), (6,6), (9,9); +insert into t values (4), (9); +--sorted_result +select /*+ INL_JOIN(p) */ * from p, t where p.id = t.id; +--sorted_result +select /*+ INL_JOIN(p) */ p.id from p, t where p.id = t.id; + +# TestPartitionUnionScanIndexJoin +# For issue https://github.com/pingcap/tidb/issues/19152 +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), primary key (c_int)) partition by range (c_int) ( partition p0 values less than (10), partition p1 values less than maxvalue); +create table t2 (c_int int, c_str varchar(40), primary key (c_int, c_str)) partition by hash (c_int) partitions 4; +insert into t1 values (10, 'interesting neumann'); +insert into t2 select * from t1; +begin; +insert into t2 values (11, 'hopeful hoover'); +select /*+ INL_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11); +select /*+ INL_HASH_JOIN(t1,t2) */ * from t1 join t2 on t1.c_int = t2.c_int and t1.c_str = t2.c_str where t1.c_int in (10, 11); +commit; + +# TestPartitionReaderUnderApply +## For issue 19458. +drop table if exists t; +create table t(c_int int); +insert into t values(1), (2), (3), (4), (5), (6), (7), (8), (9); +DROP TABLE IF EXISTS `t1`; +CREATE TABLE t1 ( + c_int int NOT NULL, + c_str varchar(40) NOT NULL, + c_datetime datetime NOT NULL, + c_timestamp timestamp NULL DEFAULT NULL, + c_double double DEFAULT NULL, + c_decimal decimal(12,6) DEFAULT NULL, + PRIMARY KEY (c_int,c_str,c_datetime) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci + PARTITION BY RANGE (c_int) +(PARTITION p0 VALUES LESS THAN (2) ENGINE = InnoDB, + PARTITION p1 VALUES LESS THAN (4) ENGINE = InnoDB, + PARTITION p2 VALUES LESS THAN (6) ENGINE = InnoDB, + PARTITION p3 VALUES LESS THAN (8) ENGINE = InnoDB, + PARTITION p4 VALUES LESS THAN (10) ENGINE = InnoDB, + PARTITION p5 VALUES LESS THAN (20) ENGINE = InnoDB, + PARTITION p6 VALUES LESS THAN (50) ENGINE = InnoDB, + PARTITION p7 VALUES LESS THAN (1000000000) ENGINE = InnoDB); +INSERT INTO `t1` VALUES (19,'nifty feistel','2020-02-28 04:01:28','2020-02-04 06:11:57',32.430079,1.284000),(20,'objective snyder','2020-04-15 17:55:04','2020-05-30 22:04:13',37.690874,9.372000); +begin; +insert into t1 values (22, 'wizardly saha', '2020-05-03 16:35:22', '2020-05-03 02:18:42', 96.534810, 0.088); +select c_int from t where (select min(t1.c_int) from t1 where t1.c_int > t.c_int) > (select count(*) from t1 where t1.c_int > t.c_int) order by c_int; +rollback; +## For issue 19450. +drop table if exists t1, t2; +create table t1 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)); +create table t2 (c_int int, c_str varchar(40), c_decimal decimal(12, 6), primary key (c_int)) partition by hash (c_int) partitions 4; +insert into t1 values (1, 'romantic robinson', 4.436), (2, 'stoic chaplygin', 9.826), (3, 'vibrant shamir', 6.300), (4, 'hungry wilson', 4.900), (5, 'naughty swartz', 9.524); +insert into t2 select * from t1; +--sorted_result +select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str); +# For issue 19450 release-4.0 +set @@tidb_partition_prune_mode='static'; +--sorted_result +select * from t1 where c_decimal in (select c_decimal from t2 where t1.c_int = t2.c_int or t1.c_int = t2.c_int and t1.c_str > t2.c_str); +set @@tidb_partition_prune_mode=default; + +# TestImproveCoverage +drop table if exists coverage_rr, coverage_dt; +create table coverage_rr ( +pk1 varchar(35) NOT NULL, +pk2 int NOT NULL, +c int, +PRIMARY KEY (pk1,pk2)) partition by hash(pk2) partitions 4; +create table coverage_dt (pk1 varchar(35), pk2 int); +insert into coverage_rr values ('ios', 3, 2),('android', 4, 7),('linux',5,1); +insert into coverage_dt values ('apple',3),('ios',3),('linux',5); +set @@tidb_partition_prune_mode = 'dynamic'; +--sorted_result +select /*+ INL_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2); +--sorted_result +select /*+ INL_MERGE_JOIN(dt, rr) */ * from coverage_dt dt join coverage_rr rr on (dt.pk1 = rr.pk1 and dt.pk2 = rr.pk2); +set @@tidb_partition_prune_mode = default; + +# TestOrderByOnUnsignedPk +drop table if exists tunsigned_hash; +create table tunsigned_hash(a bigint unsigned primary key) partition by hash(a) partitions 6; +insert into tunsigned_hash values(25), (9279808998424041135); +select min(a) from tunsigned_hash; +select max(a) from tunsigned_hash; + +# TestPartitionHandleWithKeepOrder +# https://github.com/pingcap/tidb/issues/44312 +drop table if exists t, t1; +create table t (id int not null, store_id int not null )partition by range (store_id)(partition p0 values less than (6),partition p1 values less than (11),partition p2 values less than (16),partition p3 values less than (21)); +create table t1(id int not null, store_id int not null); +insert into t values (1, 1); +insert into t values (2, 17); +insert into t1 values (0, 18); +alter table t exchange partition p3 with table t1; +alter table t add index idx(id); +analyze table t; +--sorted_result +select *,_tidb_rowid from t use index(idx) order by id limit 2; +drop table t, t1; +create table t (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c))partition by range (b)(partition p0 values less than (6),partition p1 values less than (11),partition p2 values less than (16),partition p3 values less than (21)); +create table t1 (a int, b int, c int, key `idx_ac`(a, c), key `idx_bc`(b, c)); +insert into t values (1,2,3), (2,3,4), (3,4,5); +insert into t1 values (1,18,3); +alter table t exchange partition p3 with table t1; +analyze table t; +--sorted_result +select * from t where a = 1 or b = 5 order by c limit 2; + +# TestOrderByOnHandle +## indexLookUp + _tidb_rowid +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b) order by b, _tidb_rowid limit 10; +analyze table t; +select * from t use index(idx_b) order by b, _tidb_rowid limit 10; +## indexLookUp + pkIsHandle +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,primary key(`a`),KEY `idx_b` (`b`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b) order by b, a limit 10; +analyze table t; +select * from t use index(idx_b) order by b, a limit 10; +## indexMerge + _tidb_rowid +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`),KEY `idx_c` (`c`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10; +analyze table t; +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by _tidb_rowid limit 10; +## indexMerge + pkIsHandle +drop table if exists t; +CREATE TABLE `t`(`a` int(11) NOT NULL,`b` int(11) DEFAULT NULL,`c` int(11) DEFAULT NULL,KEY `idx_b` (`b`),KEY `idx_c` (`c`),PRIMARY KEY (`a`)) PARTITION BY HASH (`a`) PARTITIONS 2; +insert into t values (2,-1,3), (3,2,2), (1,1,1); +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10; +analyze table t; +select * from t use index(idx_b, idx_c) where b = 1 or c = 2 order by a limit 10; + +# TestDynamicModeByDefault +drop table if exists trange, thash; +create table trange(a int, b int, primary key(a) clustered, index idx_b(b)) partition by range(a) ( + partition p0 values less than(300), + partition p1 values less than(500), + partition p2 values less than(1100)); +create table thash(a int, b int, primary key(a) clustered, index idx_b(b)) partition by hash(a) partitions 4; +analyze table thash, trange; +explain format='brief' select * from trange where a>400; +explain format='brief' select * from thash where a>=100; + +# TestAddDropPartitions +drop table if exists t; +set @@tidb_partition_prune_mode = 'dynamic'; +create table t(a int) partition by range(a) ( + partition p0 values less than (5), + partition p1 values less than (10), + partition p2 values less than (15)); +insert into t values (2), (7), (12); +analyze table t; +explain format='brief' select * from t where a < 3; +--sorted_result +select * from t where a < 3; +explain format='brief' select * from t where a < 8; +--sorted_result +select * from t where a < 8; +explain format='brief' select * from t where a < 20; +--sorted_result +select * from t where a < 20; +alter table t drop partition p0; +explain format='brief' select * from t where a < 3; +--sorted_result +select * from t where a < 3; +explain format='brief' select * from t where a < 8; +--sorted_result +select * from t where a < 8; +explain format='brief' select * from t where a < 20; +--sorted_result +select * from t where a < 20; +alter table t add partition (partition p3 values less than (20)); +alter table t add partition (partition p4 values less than (40)); +insert into t values (15), (25); +explain format='brief' select * from t where a < 3; +--sorted_result +select * from t where a < 3; +explain format='brief' select * from t where a < 8; +--sorted_result +select * from t where a < 8; +explain format='brief' select * from t where a < 20; +--sorted_result +select * from t where a < 20; + +# TestPartitionPruningInTransaction +drop table if exists t; +create table t(a int, b int) partition by range(a) (partition p0 values less than(3), partition p1 values less than (5), partition p2 values less than(11)); +analyze table t; +set @@tidb_partition_prune_mode = 'static'; +begin; +explain format='brief' select * from t; +--sorted_result +select * from t; +explain format='brief' select * from t where a > 3; +--sorted_result +select * from t where a > 3; +explain format='brief' select * from t where a > 7; +--sorted_result +select * from t where a > 7; +rollback; +set @@tidb_partition_prune_mode = 'dynamic'; +begin; +explain format='brief' select * from t; +--sorted_result +select * from t; +explain format='brief' select * from t where a > 3; +--sorted_result +select * from t where a > 3; +explain format='brief' select * from t where a > 7; +--sorted_result +select * from t where a > 7; +rollback; +set @@tidb_partition_prune_mode = default; + +# TestPartitionOnMissing +drop table if exists tt1, tt2; +set global tidb_partition_prune_mode='dynamic'; +set session tidb_partition_prune_mode='dynamic'; +CREATE TABLE tt1 ( + id INT NOT NULL, + listid INT, + name varchar(10), + primary key (listid) clustered +) +PARTITION BY LIST (listid) ( + PARTITION p1 VALUES IN (1), + PARTITION p2 VALUES IN (2), + PARTITION p3 VALUES IN (3), + PARTITION p4 VALUES IN (4) +); +CREATE TABLE tt2 ( + id INT NOT NULL, + listid INT +); +create index idx_listid on tt1(id,listid); +create index idx_listid on tt2(listid); +insert into tt1 values(1,1,1); +insert into tt1 values(2,2,2); +insert into tt1 values(3,3,3); +insert into tt1 values(4,4,4); +insert into tt2 values(1,1); +insert into tt2 values(2,2); +insert into tt2 values(3,3); +insert into tt2 values(4,4); +insert into tt2 values(5,5); +analyze table tt1; +analyze table tt2; +select /*+ inl_join(tt1)*/ count(*) from tt2 + left join tt1 on tt1.listid=tt2.listid and tt1.id=tt2.id; +select /*+ inl_join(tt1)*/ count(*) from tt2 + left join tt1 on tt1.listid=tt2.listid; +explain format = 'brief' select /*+ inl_join(tt1)*/ count(*) from tt2 + left join tt1 on tt1.listid=tt2.listid; +set global tidb_partition_prune_mode=default; +set session tidb_partition_prune_mode=default; diff --git a/tests/integrationtest/t/executor/prepared.test b/tests/integrationtest/t/executor/prepared.test index fe0868aee6662..273f1529ee73d 100644 --- a/tests/integrationtest/t/executor/prepared.test +++ b/tests/integrationtest/t/executor/prepared.test @@ -108,3 +108,125 @@ set @b = 11.11; execute stmt using @b; select * from t; set @@tidb_enable_prepared_plan_cache=default; + +# TestPreparedNameResolver +drop table if exists t; +create table t (id int, KEY id (id)); +prepare stmt from 'select * from t limit ? offset ?'; +-- error 1054 +prepare stmt from 'select b from t'; +-- error 1054 +prepare stmt from '(select * FROM t) union all (select * FROM t) order by a limit ?'; + +# TestPreparedDDL +# a 'create table' DDL statement should be accepted if it has no parameters. +drop table if exists t; +prepare stmt from 'create table t (id int, KEY id (id))'; + +# TestUnsupportedStmtForPrepare +# https://github.com/pingcap/tidb/issues/17412 +prepare stmt0 from "create table t0(a int primary key)"; +-- error 1295 +prepare stmt1 from "execute stmt0"; +-- error 1295 +prepare stmt2 from "deallocate prepare stmt0"; +-- error 1295 +prepare stmt4 from "prepare stmt3 from 'create table t1(a int, b int)'"; + +# TestIgnorePlanCache +drop table if exists t; +create table t (id int primary key, num int); +insert into t values (1, 1); +insert into t values (2, 2); +insert into t values (3, 3); +prepare stmt from 'select /*+ IGNORE_PLAN_CACHE() */ * from t where id=?'; +set @ignore_plan_doma = 1; +execute stmt using @ignore_plan_doma; +select @@last_plan_from_cache; + +# TestPreparedStmtWithHint +## https://github.com/pingcap/tidb/issues/18535 +prepare stmt from "select /*+ max_execution_time(10) */ sleep(3)"; +set @a=now(); +execute stmt; +select timediff(now(), @a) < 3; +set @a=now(); +select /*+ max_execution_time(10) */ sleep(3); +select timediff(now(), @a) < 3; + +## see https://github.com/pingcap/tidb/issues/46817 +drop table if exists t; +create table t (i int); +prepare stmt from 'with a as (select /*+ qb_name(qb1) */ * from t) select /*+ leading(@qb1)*/ * from a;'; + +# TestIssue28782 +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +prepare stmt from 'SELECT IF(?, 1, 0);'; +set @a=1, @b=null, @c=0; +execute stmt using @a; +execute stmt using @b; +## TODO(Reminiscent): Support cache more tableDual plan. +select @@last_plan_from_cache; +execute stmt using @c; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestIssue28087And28162 +set tidb_enable_prepared_plan_cache=1; +## issue 28087 +drop table if exists IDT_26207; +CREATE TABLE IDT_26207 (col1 bit(1)); +insert into IDT_26207 values(0x0), (0x1); +prepare stmt from 'select hex(t1.col1) from IDT_26207 as t1 left join IDT_26207 as t2 on t1.col1 = t2.col1 where t1.col1 in (?, ?, ?)'; +set @a=0x01, @b=0x01, @c=0x01; +execute stmt using @a,@b,@c; +set @a=0x00, @b=0x00, @c=0x01; +execute stmt using @a,@b,@c; +select @@last_plan_from_cache; + +## issue 28162 +drop table if exists IDT_MC21780; +CREATE TABLE IDT_MC21780 ( + COL1 timestamp NULL DEFAULT NULL, + COL2 timestamp NULL DEFAULT NULL, + COL3 timestamp NULL DEFAULT NULL, + KEY U_M_COL (COL1,COL2) +); +insert into IDT_MC21780 values("1970-12-18 10:53:28", "1970-12-18 10:53:28", "1970-12-18 10:53:28"); +prepare stmt from 'select/*+ hash_join(t1) */ * from IDT_MC21780 t1 join IDT_MC21780 t2 on t1.col1 = t2.col1 where t1. col1 < ? and t2. col1 in (?, ?, ?);'; +set @a="2038-01-19 03:14:07", @b="2038-01-19 03:14:07", @c="2038-01-19 03:14:07", @d="2038-01-19 03:14:07"; +execute stmt using @a,@b,@c,@d; +set @a="1976-09-09 20:21:11", @b="2021-07-14 09:28:16", @c="1982-01-09 03:36:39", @d="1970-12-18 10:53:28"; +execute stmt using @a,@b,@c,@d; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; + +# TestTemporaryTable4PlanCache +set tidb_enable_prepared_plan_cache=1; +set @@tidb_enable_collect_execution_info=0; +drop table if exists tmp2; +create temporary table tmp2 (a int, b int, key(a), key(b)); +prepare stmt from 'select * from tmp2;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +drop table if exists tmp_t; +create global temporary table tmp_t (id int primary key, a int, b int, index(a)) on commit delete rows; +prepare stmt from 'select * from tmp_t;'; +execute stmt; +execute stmt; +select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_enable_collect_execution_info=default; + +# TestIssue31141 +set tidb_enable_prepared_plan_cache=1; +set @@tidb_txn_mode = 'pessimistic'; +prepare stmt1 from 'do 1'; +set @@tidb_txn_mode = 'optimistic'; +prepare stmt1 from 'do 1'; +set tidb_enable_prepared_plan_cache=default; +set @@tidb_txn_mode=default; + diff --git a/tests/integrationtest/t/executor/revoke.test b/tests/integrationtest/t/executor/revoke.test new file mode 100644 index 0000000000000..734f548a5d4e2 --- /dev/null +++ b/tests/integrationtest/t/executor/revoke.test @@ -0,0 +1,108 @@ +# TestRevokeTableSingle +# ref issue #38421 +drop user if exists test; +drop table if exists test1; +CREATE USER test; +CREATE TABLE executor__revoke.test1(c1 int); +GRANT SELECT ON executor__revoke.test1 TO test; +REVOKE SELECT ON executor__revoke.test1 from test; +SELECT Column_priv FROM mysql.tables_priv WHERE User="test" ; + +# TestRevokeTableSingleColumn +# ref issue #38421(column fix) +drop user if exists test; +CREATE USER test; +GRANT SELECT(Host) ON mysql.db TO test; +GRANT SELECT(DB) ON mysql.db TO test; +REVOKE SELECT(Host) ON mysql.db FROM test; +SELECT count(Column_priv) FROM mysql.columns_priv WHERE User="test" and Column_name ='Host' ; +SELECT count(Column_priv) FROM mysql.columns_priv WHERE User="test" and Column_name ='DB' ; + +# TestRevokeDynamicPrivs +DROP USER if exists dyn; +create user dyn; +GRANT BACKUP_Admin ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +## try revoking only on test.* - should fail: +-- error 3619 +REVOKE BACKUP_Admin,system_variables_admin ON executor__revoke.* FROM dyn; + +## privs should still be intact: +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +## with correct usage, the privilege is revoked +REVOKE BACKUP_Admin ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +## Revoke bogus is a warning in MySQL +REVOKE bogus ON *.* FROM dyn; +SHOW WARNINGS; + +## grant and revoke two dynamic privileges at once. +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* TO dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; +REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +## revoke a combination of dynamic + non-dynamic +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* TO dyn; +REVOKE BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT, INSERT ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +## revoke grant option from privileges +GRANT BACKUP_ADMIN, SYSTEM_VARIABLES_ADMIN, SELECT ON *.* TO dyn WITH GRANT OPTION; +REVOKE BACKUP_ADMIN, SELECT, GRANT OPTION ON *.* FROM dyn; +SELECT * FROM mysql.global_grants WHERE `Host` = '%' AND `User` = 'dyn' ORDER BY user,host,priv,with_grant_option; + +# TestRevokeOnNonExistTable +# issue #28533 +drop DATABASE if exists d1; +drop user if exists issue28533; +CREATE DATABASE d1; +USE d1; +CREATE TABLE t1 (a int); +CREATE USER issue28533; + +## GRANT ON existent table success +GRANT ALTER ON d1.t1 TO issue28533; + +## GRANT ON non-existent table success +GRANT INSERT, CREATE ON d1.t2 TO issue28533; + +## REVOKE ON non-existent table success +DROP TABLE t1; +REVOKE ALTER ON d1.t1 FROM issue28533; + +DROP USER issue28533; +DROP TABLE IF EXISTS t1; +DROP DATABASE IF EXISTS d1; +use executor__revoke; + +# TestIssue41773 +drop user if exists 't1234'@'%'; +create table if not exists xx (id int); +CREATE USER 't1234'@'%' IDENTIFIED BY 'sNGNQo12fEHe0n3vU'; +GRANT USAGE ON * TO 't1234'@'%'; +GRANT USAGE ON executor__revoke.* TO 't1234'@'%'; +GRANT USAGE ON executor__revoke.xx TO 't1234'@'%'; +REVOKE USAGE ON * FROM 't1234'@'%'; +REVOKE USAGE ON executor__revoke.* FROM 't1234'@'%'; +REVOKE USAGE ON executor__revoke.xx FROM 't1234'@'%'; + +# TestCaseInsensitiveSchemaNames +# Check https://github.com/pingcap/tidb/issues/41048 +drop table if exists TABLE_PRIV; +CREATE TABLE executor__revoke.TABLE_PRIV(id int, name varchar(20)); + +## Verify the case-insensitive updates for mysql.tables_priv table. +GRANT SELECT ON executor__revoke.table_priv TO 'root'@'%'; +revoke SELECT ON executor__revoke.TABLE_PRIV from 'root'@'%'; + +## Verify the case-insensitive updates for mysql.db table. +GRANT SELECT ON executor__revoke.* TO 'root'@'%'; +revoke SELECT ON executor__revoke.* from 'root'@'%'; + +## Verify the case-insensitive updates for mysql.columns_priv table. +GRANT SELECT (id), INSERT (ID, name) ON executor__revoke.TABLE_PRIV TO 'root'@'%'; +REVOKE SELECT (ID) ON executor__revoke.taBle_priv from 'root'@'%'; + diff --git a/tests/integrationtest/t/executor/sample.test b/tests/integrationtest/t/executor/sample.test new file mode 100644 index 0000000000000..723163b0d1d94 --- /dev/null +++ b/tests/integrationtest/t/executor/sample.test @@ -0,0 +1,125 @@ +set @@global.tidb_scatter_region=1 + +# TestTableSampleSchema +drop table if exists t; +set tidb_enable_clustered_index = on; +create table t (a varchar(255) primary key, b bigint); +insert into t values ('b', 100), ('y', 100); +split table t between ('a') and ('z') regions 2; +select a from t tablesample regions(); +drop table t; +create table t (a varchar(255), b int, c decimal, primary key (a, b, c)); +split table t between ('a', 0, 0) and ('z', 100, 100) regions 2; +insert into t values ('b', 10, 100), ('y', 100, 10); +select * from t tablesample regions(); +drop table t; +create table t (a bigint primary key, b int default 10); +split table t between (1) and (100000) regions 4; +insert into t(a) values (200), (25600), (50300), (99900), (99901); +select a from t tablesample regions(); +drop table t; +create table t (a bigint, b int default 10); +split table t between (0) and (100000) regions 4; +insert into t(a) values (1), (2), (3); +select a from t tablesample regions(); +set tidb_enable_clustered_index=default; + +# TestTableSampleInvalid +drop table if exists t; +create table t (a int, b varchar(255)); +insert into t values (1, 'abc'); +create view v as select * from t; +-- error 8128 +select * from v tablesample regions(); +-- error 8128 +select * from information_schema.tables tablesample regions(); +-- error 8128 +select a from t tablesample system(); +-- error 8128 +select a from t tablesample bernoulli(10 percent); +-- error 8128 +select a from t as t1 tablesample regions(), t as t2 tablesample system(); +-- error 8128 +select a from t tablesample (); + +# TestTableSampleWithTiDBRowID +drop table if exists t; +create table t (a int, b varchar(255)); +insert into t values (1, 'abc'); +select _tidb_rowid from t tablesample regions(); +select a, _tidb_rowid from t tablesample regions(); +select _tidb_rowid, b from t tablesample regions(); +select b, _tidb_rowid, a from t tablesample regions(); + +# TestTableSampleWithPartition +drop table if exists t; +create table t (a int, b varchar(255), primary key (a)) partition by hash(a) partitions 2; +insert into t values (1, '1'), (2, '2'), (3, '3'); +select count(*) from t tablesample regions(); +delete from t; +insert into t values (1, '1'); +select count(*) from t partition (p0) tablesample regions(); +select count(*) from t partition (p1) tablesample regions(); +## Test https://github.com/pingcap/tidb/issues/27349 +drop table if exists t; +create table t (a int, b int, unique key idx(a)) partition by range (a) ( + partition p0 values less than (0), + partition p1 values less than (10), + partition p2 values less than (30), + partition p3 values less than (maxvalue)); +insert into t values (2, 2), (31, 31), (12, 12); +select _tidb_rowid from t tablesample regions() order by _tidb_rowid; + +# TestTableSampleGeneratedColumns +drop table if exists t; +create table t (a int primary key, b int as (a + 1), c int as (b + 1), d int as (c + 1)); +split table t between (0) and (10000) regions 4; +insert into t(a) values (1), (2), (2999), (4999), (9999); +select a from t tablesample regions(); +select c from t tablesample regions(); +select a, b from t tablesample regions(); +select d, c from t tablesample regions(); +select a, d from t tablesample regions(); + +# TestTableSampleUnionScanIgnorePendingKV +drop table if exists t; +create table t (a int primary key); +split table t between (0) and (40000) regions 4; +insert into t values (1), (1000), (10002); +select * from t tablesample regions(); +begin; +insert into t values (20006), (50000); +select * from t tablesample regions(); +delete from t where a = 1; +select * from t tablesample regions(); +commit; +select * from t tablesample regions(); + +# TestTableSampleTransactionConsistency +drop table if exists t; +create table t (a int primary key); +split table t between (0) and (40000) regions 4; +insert into t values (1), (1000), (10002); +begin; +select * from t tablesample regions(); + +connect (conn1, localhost, root,, executor__sample); +insert into t values (20006), (50000); +connection default; + +select * from t tablesample regions(); +commit; +select * from t tablesample regions(); +disconnect conn1; + +# TestTableSampleNotSupportedPlanWarning +drop table if exists t; +create table t (a int primary key, b int, c varchar(255)); +split table t between (0) and (10000) regions 5; +insert into t values (1000, 1, '1'), (1001, 1, '1'), (2100, 2, '2'), (4500, 3, '3'); +create index idx_0 on t (b); +select a from t tablesample regions() order by a; +select a from t use index (idx_0) tablesample regions() order by a; +show warnings; + +set @@global.tidb_scatter_region=default; diff --git a/tests/integrationtest/t/executor/set.test b/tests/integrationtest/t/executor/set.test new file mode 100644 index 0000000000000..112926aaf67d4 --- /dev/null +++ b/tests/integrationtest/t/executor/set.test @@ -0,0 +1,13 @@ +# TestMaxDeltaSchemaCount +set @@global.tidb_max_delta_schema_count= -1; +show warnings; +select @@global.tidb_max_delta_schema_count; +set @@global.tidb_max_delta_schema_count= 9223372036854775807; +show warnings; +select @@global.tidb_max_delta_schema_count; +-- error 1232 +set @@global.tidb_max_delta_schema_count= invalid_val; +set @@global.tidb_max_delta_schema_count= 2048; +select @@global.tidb_max_delta_schema_count; +set @@global.tidb_max_delta_schema_count= default; + diff --git a/tests/integrationtest/t/executor/show.test b/tests/integrationtest/t/executor/show.test index 826e451747667..ef88ad76614eb 100644 --- a/tests/integrationtest/t/executor/show.test +++ b/tests/integrationtest/t/executor/show.test @@ -62,3 +62,368 @@ show create table t; drop table if exists t; create table t (created_at datetime) TTL = created_at + INTERVAL 100 YEAR TTL_JOB_INTERVAL = '1d'; show create table t; + +# TestShowHistogramsInFlight +show histograms_in_flight; + +# TestShowOpenTables +show open tables; +show open tables in executor__show; + +# TestShowCreateViewDefiner +create or replace view v1 as select 1; +show create view v1; +drop view v1; + +# TestShowCreateTable +drop database if exists test1; +drop database if exists test2; +drop table if exists t, t1; +create table t1(a int,b int); +drop view if exists v1; +create or replace definer=`root`@`127.0.0.1` view v1 as select * from t1; +show create table v1; +show create view v1; +drop view v1; +drop table t1; +drop view if exists v; +create or replace definer=`root`@`127.0.0.1` view v as select JSON_MERGE('{}', '{}') as col; +show create view v; +drop view if exists v; +drop table if exists t1; +create table t1(a int,b int); +create or replace definer=`root`@`127.0.0.1` view v1 as select avg(a),t1.* from t1 group by a; +show create view v1; +drop view v1; +create or replace definer=`root`@`127.0.0.1` view v1 as select a+b, t1.* , a as c from t1; +show create view v1; +drop table t1; +drop view v1; +create table t(c int, b int as (c + 1))ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +show create table `t`; +drop table t; +create table t(c int, b int as (c + 1) not null)ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +show create table `t`; +drop table t; +create table t ( a char(10) charset utf8 collate utf8_bin, b char(10) as (rtrim(a))); +show create table `t`; +drop table t; +drop table if exists different_charset; +create table different_charset(ch1 varchar(10) charset utf8, ch2 varchar(10) charset binary); +show create table different_charset; +drop table if exists t; +create table `t` ( +`a` timestamp not null default current_timestamp, +`b` timestamp(3) default current_timestamp(3), +`c` datetime default current_timestamp, +`d` datetime(4) default current_timestamp(4), +`e` varchar(20) default 'cUrrent_tImestamp', +`f` datetime(2) default current_timestamp(2) on update current_timestamp(2), +`g` timestamp(2) default current_timestamp(2) on update current_timestamp(2), +`h` date default current_date ); +show create table `t`; +drop table t; +create table t (a int, b int) shard_row_id_bits = 4 pre_split_regions=3; +show create table `t`; +drop table t; +drop table if exists t1; +create table t1(c int unsigned default 0); +show create table `t1`; +drop table t1; +CREATE TABLE `log` (`LOG_ID` bigint(20) UNSIGNED NOT NULL AUTO_INCREMENT,`ROUND_ID` bigint(20) UNSIGNED NOT NULL,`USER_ID` int(10) UNSIGNED NOT NULL,`USER_IP` int(10) UNSIGNED DEFAULT NULL,`END_TIME` datetime NOT NULL,`USER_TYPE` int(11) DEFAULT NULL,`APP_ID` int(11) DEFAULT NULL,PRIMARY KEY (`LOG_ID`,`END_TIME`) NONCLUSTERED,KEY `IDX_EndTime` (`END_TIME`),KEY `IDX_RoundId` (`ROUND_ID`),KEY `IDX_UserId_EndTime` (`USER_ID`,`END_TIME`)) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin AUTO_INCREMENT=505488 PARTITION BY RANGE ( month(`end_time`) ) (PARTITION `p1` VALUES LESS THAN (2),PARTITION `p2` VALUES LESS THAN (3),PARTITION `p3` VALUES LESS THAN (4),PARTITION `p4` VALUES LESS THAN (5),PARTITION `p5` VALUES LESS THAN (6),PARTITION `p6` VALUES LESS THAN (7),PARTITION `p7` VALUES LESS THAN (8),PARTITION `p8` VALUES LESS THAN (9),PARTITION `p9` VALUES LESS THAN (10),PARTITION `p10` VALUES LESS THAN (11),PARTITION `p11` VALUES LESS THAN (12),PARTITION `p12` VALUES LESS THAN (MAXVALUE)); +show create table log; +create table ttt4(a varchar(123) default null collate utf8mb4_unicode_ci)engine=innodb default charset=utf8mb4 collate=utf8mb4_unicode_ci; +show create table `ttt4`; +create table ttt5(a varchar(123) default null)engine=innodb default charset=utf8mb4 collate=utf8mb4_bin; +show create table `ttt5`; +drop table if exists t; +create table t(a int, b real); +alter table t add index expr_idx((a*b+1)); +show create table t; +drop sequence if exists seq; +create sequence seq; +show create table seq; +drop table if exists binary_collate; +create table binary_collate(a varchar(10)) default collate=binary; +show create table binary_collate; +drop table if exists binary_collate; +create table binary_collate(a varchar(10)) default charset=binary collate=binary; +show create table binary_collate; +drop table if exists binary_collate; +create table binary_collate(a varchar(10)) default charset=utf8mb4 collate=utf8mb4_bin; +show create table binary_collate; +drop table if exists default_num; +create table default_num(a int default 11); +show create table default_num; +drop table if exists default_varchar; +create table default_varchar(a varchar(10) default "haha"); +show create table default_varchar; +drop table if exists default_sequence; +create table default_sequence(a int default nextval(seq)); +show create table default_sequence; +set @@foreign_key_checks=0; +DROP TABLE IF EXISTS parent, child; +CREATE TABLE child (id INT NOT NULL PRIMARY KEY auto_increment, parent_id INT NOT NULL, INDEX par_ind (parent_id), CONSTRAINT child_ibfk_1 FOREIGN KEY (parent_id) REFERENCES parent(id)); +CREATE TABLE parent ( id INT NOT NULL PRIMARY KEY auto_increment ); +show create table child; +DROP TABLE child; +CREATE TABLE child (id INT NOT NULL PRIMARY KEY auto_increment, parent_id INT NOT NULL, INDEX par_ind (parent_id), CONSTRAINT child_ibfk_1 FOREIGN KEY (parent_id) REFERENCES parent(id) ON DELETE RESTRICT ON UPDATE CASCADE); +show create table child; +create database test1; +create database test2; +create table test1.t1 (id int key, b int, index(b)); +create table test2.t2 (id int key, b int, foreign key fk(b) references test1.t1(id)); +show create table test2.t2; +drop table if exists t; +create table t(a int, b char(10) as ('a')); +show create table t; +drop table if exists t; +create table t(a int, b char(10) as (_utf8'a')); +show create table t; +set @@session.tidb_enable_list_partition = ON; +DROP TABLE IF EXISTS t; +create table t (id int, name varchar(10), unique index idx (id)) partition by list (id) ( + partition p0 values in (3,5,6,9,17), + partition p1 values in (1,2,10,11,19,20), + partition p2 values in (4,12,13,14,18), + partition p3 values in (7,8,15,16,null) + ); +show create table t; +DROP TABLE IF EXISTS t; +create table t (id int, name varchar(10), unique index idx (id)) partition by list columns (id) ( + partition p0 values in (3,5,6,9,17), + partition p1 values in (1,2,10,11,19,20), + partition p2 values in (4,12,13,14,18), + partition p3 values in (7,8,15,16,null) + ); +show create table t; +DROP TABLE IF EXISTS t; +create table t (id int, name varchar(10), unique index idx (id, name)) partition by list columns (id, name) ( + partition p0 values in ((3, '1'), (5, '5')), + partition p1 values in ((1, '1'))); +show create table t; +DROP TABLE IF EXISTS t; +create table t (id int primary key, v varchar(255) not null, key idx_v (v) comment 'foo\'bar'); +show create table t; +CREATE TABLE `thash` ( + `id` bigint unsigned NOT NULL, + `data` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`) +) +PARTITION BY HASH (`id`) +(PARTITION pEven COMMENT = "Even ids", + PARTITION pOdd COMMENT = "Odd ids"); +show create table `thash`; +drop table if exists `thash`; +CREATE TABLE `thash` ( + `id` bigint unsigned NOT NULL, + `data` varchar(255) DEFAULT NULL, + PRIMARY KEY (`id`) +) +PARTITION BY HASH (`id`); +show create table `thash`; +drop table if exists t; +create table t(a int primary key, b varchar(20) default '\\'); +show create table t; +drop table if exists t; +create table t(a set('a', 'b') charset binary,b enum('a', 'b') charset ascii); +show create table t; +drop table if exists t; +create table t(a bit default (rand())); +show create table t; +drop table if exists t; +-- error 1654 +create table t (a varchar(255) character set ascii) partition by range columns (a) (partition p values less than (0xff)); +create table t (a varchar(255) character set ascii) partition by range columns (a) (partition p values less than (0x7f)); +show create table t; +set @@session.tidb_enable_list_partition = default; +set @@foreign_key_checks=default; + +# TestShowErrors +create table if not exists show_errors (a int); +-- error 1050 +create table show_errors (a int); +show errors; +select 1; +-- error 1064 +create invalid; +show errors; + +# TestIssue3641 +connect (conn1, localhost, root,,); +-- error 1046 +show tables; +-- error 1046 +show tables; +connection default; +disconnect conn1; + +# TestShowSlow +# The test result is volatile, because +# 1. Slow queries is stored in domain, which may be affected by other tests. +# 2. Collecting slow queries is a asynchronous process, check immediately may not get the expected result. +# 3. Make slow query like "select sleep(1)" would slow the CI. +# So, we just cover the code but do not check the result. +--disable_result_log +admin show slow recent 3; +admin show slow top 3; +admin show slow top internal 3; +admin show slow top all 3; +--enable_result_log + +# TestShowCreateStmtIgnoreLocalTemporaryTables +drop table if exists v1; +drop view if exists v1; +drop sequence if exists seq1; +drop table if exists seq1; + +## SHOW CREATE VIEW ignores local temporary table with the same name +create view v1 as select 1; +create temporary table v1 (a int); +show create table v1; +drop view v1; +-- error 1146 +show create view v1; + +## SHOW CREATE SEQUENCE ignores local temporary table with the same name +drop view if exists seq1; +create sequence seq1; +create temporary table seq1 (a int); +show create sequence seq1; +drop sequence seq1; +-- error 1146 +show create sequence seq1; + +# TestShowBuiltin +show builtins; + +# TestShowPerformanceSchema +# for Issue 19231 +# Ideally we should create a new performance_schema table here with indices that we run the tests on. +# However, its not possible to create a new performance_schema table since its a special in memory table. +# Instead the test below uses the default index on the table. +SHOW INDEX FROM performance_schema.events_statements_summary_by_digest; + +# TestShowTemporaryTable +drop table if exists t1, t3, t4, t5, t6, t7; +create global temporary table t1 (id int) on commit delete rows; +create global temporary table t3 (i int primary key, j int) on commit delete rows; + +## For issue https://github.com/pingcap/tidb/issues/24752 +show create table t1; + +## No panic, fix issue https://github.com/pingcap/tidb/issues/24788 +show create table t3; + +## Verify that the `show create table` result can be used to build the table. +CREATE GLOBAL TEMPORARY TABLE `t4` ( + `i` int(11) NOT NULL, + `j` int(11) DEFAULT NULL, + PRIMARY KEY (`i`) /*T![clustered_index] CLUSTERED */ +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin ON COMMIT DELETE ROWS; + +## Cover auto increment column. +CREATE GLOBAL TEMPORARY TABLE t5 ( + id int(11) NOT NULL AUTO_INCREMENT, + b int(11) NOT NULL, + pad varbinary(255) DEFAULT NULL, + PRIMARY KEY (id), + KEY b (b)) ON COMMIT DELETE ROWS; +show create table t5; +create temporary table t6 (i int primary key, j int); +show create table t6; +create temporary table t7 (i int primary key auto_increment, j int); +begin; +insert into t7 (j) values (14); +insert into t7 (j) values (24); +select * from t7; +show create table t7; +commit; + +# TestShowCachedTable +drop table if exists t1; +create table t1 (id int); +alter table t1 cache; +show create table t1; +select create_options from information_schema.tables where table_schema = 'executor__show' and table_name = 't1'; +alter table t1 nocache; +show create table t1; +select create_options from information_schema.tables where table_schema = 'executor__show' and table_name = 't1'; + +# TestShowDatabasesLike +DROP DATABASE IF EXISTS `TEST_$1`; +DROP DATABASE IF EXISTS `test_$2`; +CREATE DATABASE `TEST_$1`; +CREATE DATABASE `test_$2`; +SHOW DATABASES LIKE 'TEST_$%'; +SHOW DATABASES LIKE 'test_$%'; + +# TestShowCollationsLike +SHOW COLLATION LIKE 'UTF8MB4_BI%'; +SHOW COLLATION LIKE 'utf8mb4_bi%'; + +# TestShowDatabasesInfoSchemaFirst +drop user if exists 'show'; +drop database if exists AAAA; +drop database if exists BBBB; +create user 'show'@'%'; + +connect (conn1, localhost, show,,); +show databases; + +connection default; +create database AAAA; +create database BBBB; +grant select on AAAA.* to 'show'@'%'; +grant select on BBBB.* to 'show'@'%'; + +connection conn1; +show databases; + +connection default; +disconnect conn1; +drop user 'show'@'%'; +drop database AAAA; +drop database BBBB; + +# TestShowTableStatusLike +DROP table IF EXISTS `T1`; +CREATE table `T1` (a int); +## ignore create time +-- replace_column 12 1 +SHOW table status LIKE 't1'; +DROP table IF EXISTS `Li_1`; +DROP table IF EXISTS `li_2`; +CREATE table `Li_1` (a int); +CREATE table `li_2` (a int); +-- replace_column 12 1 +SHOW table status LIKE 'li%'; + +# TestShowPasswordVariable +SET GLOBAL authentication_ldap_sasl_bind_root_pwd = ''; +show variables like 'authentication_ldap_sasl_bind_root_pwd'; +SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_sasl_bind_root_pwd'; +SET GLOBAL authentication_ldap_sasl_bind_root_pwd = password; +show variables like 'authentication_ldap_sasl_bind_root_pwd'; +SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_sasl_bind_root_pwd'; +SET GLOBAL authentication_ldap_simple_bind_root_pwd = ''; +show variables like 'authentication_ldap_simple_bind_root_pwd'; +SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_simple_bind_root_pwd'; +SET GLOBAL authentication_ldap_simple_bind_root_pwd = password; +show variables like 'authentication_ldap_simple_bind_root_pwd'; +SELECT current_value FROM information_schema.variables_info WHERE VARIABLE_NAME LIKE 'authentication_ldap_simple_bind_root_pwd'; +SET GLOBAL authentication_ldap_simple_bind_root_pwd = default; +SET GLOBAL authentication_ldap_sasl_bind_root_pwd = default; + +# TestShowForNewCollations +show collation; +select * from information_schema.COLLATIONS; +show character set like '%utf8mb4%'; +select * from information_schema.COLLATIONS where IS_DEFAULT='Yes' and CHARACTER_SET_NAME='utf8mb4'; +set @@session.default_collation_for_utf8mb4='utf8mb4_0900_ai_ci'; +show variables like 'default_collation_for_utf8mb4'; +show collation; +select * from information_schema.COLLATIONS; +show character set like '%utf8mb4%'; +select * from information_schema.COLLATIONS where IS_DEFAULT='Yes' and CHARACTER_SET_NAME='utf8mb4'; +set @@session.default_collation_for_utf8mb4=default; diff --git a/tests/integrationtest/t/executor/simple.test b/tests/integrationtest/t/executor/simple.test new file mode 100644 index 0000000000000..b1f99510e2b9b --- /dev/null +++ b/tests/integrationtest/t/executor/simple.test @@ -0,0 +1,455 @@ +# TestFlushTables +FLUSH TABLES; +-- error 1105 +FLUSH TABLES WITH READ LOCK; + +# TestUseDB +USE test; +-- error 1046 +USE ``; +use executor__simple; + +# TestIssue9111 +# CREATE USER / DROP USER fails if admin doesn't have insert privilege on `mysql.user` table. +drop user if exists 'user_admin'@'localhost'; +drop user if exists test_create_user; +create user 'user_admin'@'localhost'; +grant create user on *.* to 'user_admin'@'localhost'; + +connect (conn1, localhost, user_admin,,); +create user test_create_user; +drop user test_create_user; +connection default; + +revoke create user on *.* from 'user_admin'@'localhost'; +grant insert, delete on mysql.user to 'user_admin'@'localhost'; + +connection conn1; +create user test_create_user; +drop user test_create_user; +create role test_create_user; +drop role test_create_user; +connection default; + +drop user 'user_admin'@'localhost'; +disconnect conn1; + +# TestRoleAtomic +drop role if exists r1, r2, r3; +create role r2; +-- error 1396 +create role r1, r2, r3; +SELECT user FROM mysql.User WHERE user in ('r1', 'r2', 'r3'); +-- error 1396 +drop role r1, r2, r3; +SELECT user FROM mysql.User WHERE user in ('r1', 'r2', 'r3'); +drop role r2; + +# TestIssue23649 +# See https://github.com/pingcap/tidb/issues/23649 +DROP USER IF EXISTS issue23649; +CREATE USER issue23649; +-- error 3523 +GRANT bogusrole to issue23649; +-- error 3523 +GRANT bogusrole to nonexisting; + +# TestSetCurrentUserPwd +drop user if exists issue28534; +CREATE USER issue28534; +connect (conn1, localhost, issue28534,,); +SET PASSWORD FOR CURRENT_USER() = "43582eussi"; +connection default; +SELECT authentication_string FROM mysql.User WHERE User="issue28534"; +DROP USER IF EXISTS issue28534; +disconnect conn1; + +# TestShowGrantsAfterDropRole +drop user if exists u29473; +drop role if exists r29473; +CREATE USER u29473; +CREATE ROLE r29473; +GRANT r29473 TO u29473; +GRANT CREATE USER ON *.* TO u29473; +connect (conn1, localhost, u29473,,); +SET ROLE r29473; +DROP ROLE r29473; +SHOW GRANTS; +connection default; +disconnect conn1; +DROP USER IF EXISTS u29473; + +# TestPrivilegesAfterDropUser +drop table if exists t1; +drop user if exists u1; +create table t1(id int, v int); +CREATE USER u1 require ssl; +GRANT CREATE ON executor__simple.* TO u1; +GRANT UPDATE ON executor__simple.t1 TO u1; +GRANT SYSTEM_VARIABLES_ADMIN ON *.* TO u1; +GRANT SELECT(v), UPDATE(v) on executor__simple.t1 TO u1; +SELECT COUNT(1) FROM mysql.global_grants WHERE USER='u1' AND HOST='%'; +SELECT COUNT(1) FROM mysql.global_priv WHERE USER='u1' AND HOST='%'; +SELECT COUNT(1) FROM mysql.tables_priv WHERE USER='u1' AND HOST='%'; +SELECT COUNT(1) FROM mysql.columns_priv WHERE USER='u1' AND HOST='%'; +SHOW GRANTS FOR u1; +DROP USER u1; +-- error 1141 +SHOW GRANTS FOR u1; +SELECT * FROM mysql.global_grants WHERE USER='u1' AND HOST='%'; +SELECT * FROM mysql.global_priv WHERE USER='u1' AND HOST='%'; +SELECT * FROM mysql.tables_priv WHERE USER='u1' AND HOST='%'; +SELECT * FROM mysql.columns_priv WHERE USER='u1' AND HOST='%'; +DROP USER IF EXISTS u1; +drop table t1; + +# TestDropRoleAfterRevoke +# issue 29781 +drop role if exists r1,r2,r3; +create role r1, r2, r3; +grant r1,r2,r3 to current_user(); +set role all; +revoke r1, r3 from root; +drop role r1; +drop role if exists r1, r2, r3; + +# TestStatementsCauseImplicitCommit +# Test some of the implicit commit statements. +# See https://dev.mysql.com/doc/refman/5.7/en/implicit-commit.html +drop table if exists ic, xx; +create table ic (id int primary key); +begin; +insert into ic values (0); +create table xx (id int); +select * from ic where id = 0; +delete from ic; +rollback; +begin; +insert into ic values (1); +create user 'xx'@'127.0.0.1'; +select * from ic where id = 1; +delete from ic; +rollback; +begin; +insert into ic values (2); +grant SELECT on executor__simple.ic to 'xx'@'127.0.0.1'; +select * from ic where id = 2; +delete from ic; +rollback; +begin; +insert into ic values (3); +flush privileges; +select * from ic where id = 3; +delete from ic; +rollback; +begin; +insert into ic values (4); +analyze table ic; +select * from ic where id = 4; +delete from ic; +rollback; + +# TestDoWithAggFunc +DO sum(1); +DO avg(@e+@f); +DO GROUP_CONCAT(NULLIF(ELT(1, @e), 2.0) ORDER BY 1); + +# TestSetDefaultRoleAll +drop user if exists test_all; +create user test_all; +connect (conn1, localhost, test_all,,); +set default role all to test_all; +connection default; +disconnect conn1; + +# TestFlushPrivileges +drop user if exists 'testflush'@'localhost'; +CREATE USER 'testflush'@'localhost' IDENTIFIED BY ''; +UPDATE mysql.User SET Select_priv='Y' WHERE User="testflush" and Host="localhost"; +connect (conn1, localhost, testflush,,); +--error 1142 +SELECT authentication_string FROM mysql.User WHERE User="testflush" and Host="localhost"; +connection default; +FLUSH PRIVILEGES; +connection conn1; +SELECT authentication_string FROM mysql.User WHERE User="testflush" and Host="localhost"; +connection default; +disconnect conn1; + +# TestCreateUserWithLDAP +drop user if exists 'bob'@'localhost'; +drop user if exists 'bob2'@'localhost'; +CREATE USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_simple AS 'uid=bob,ou=People,dc=example,dc=com'; +SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'; +CREATE USER 'bob2'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob2,ou=People,dc=example,dc=com'; +SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob2'; + +# TestAlterUserWithLDAP +drop user if exists 'bob'@'localhost'; +# case 1: alter from a LDAP user to LDAP user +CREATE USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_simple AS 'uid=bob,ou=People,dc=example,dc=com'; +SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'; +SELECT Host, User, authentication_string, plugin FROM mysql.User WHERE User = 'bob'; +# case 2: should ignore the password history +ALTER USER 'bob'@'localhost' PASSWORD HISTORY 5 +; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=People,dc=example,dc=com'; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=People,dc=example,dc=com'; +ALTER USER 'bob'@'localhost' IDENTIFIED WITH authentication_ldap_sasl AS 'uid=bob,ou=Manager,dc=example,dc=com'; + +# TestIssue44098 +drop user if exists u1, u2, u3, u4, u5; +set global validate_password.enable = 1; +create user u1 identified with 'tidb_auth_token'; +create user u2 identified with 'auth_socket'; +create user u3 identified with 'authentication_ldap_simple'; +create user u4 identified with 'authentication_ldap_sasl'; +-- error 1819 +create user u5 identified with 'mysql_native_password'; +-- error 1819 +create user u5 identified with 'caching_sha2_password'; +-- error 1819 +create user u5 identified with 'tidb_sm3_password'; +-- error 1524 +create user u5 identified with 'mysql_clear_password'; +-- error 1524 +create user u5 identified with 'tidb_session_token'; +set global validate_password.enable = default; + +# TestIssue33144 +drop role if exists 'r1'; +create role 'r1' ; +grant 'r1' to current_user(); +revoke 'r1' from current_user(); +grant 'r1' to current_user(),current_user(); +revoke 'r1' from current_user(),current_user(); +drop role 'r1' ; + +# TestRoleAdmin +drop role if exists 'targetRole'; +drop user if exists 'testRoleAdmin'; +CREATE USER 'testRoleAdmin'; +CREATE ROLE 'targetRole'; +connect (conn1, localhost, testRoleAdmin,,); +--error 1227 +GRANT `targetRole` TO `testRoleAdmin`; +connection default; +GRANT SUPER ON *.* TO `testRoleAdmin`; +connection conn1; +GRANT `targetRole` TO `testRoleAdmin`; +REVOKE `targetRole` FROM `testRoleAdmin`; +connection default; +DROP USER 'testRoleAdmin'; +DROP ROLE 'targetRole'; +disconnect conn1; + +# TestDefaultRole +drop role if exists r_1, r_2, r_3, u_1; +CREATE ROLE r_1, r_2, r_3, u_1; +insert into mysql.role_edges (FROM_HOST,FROM_USER,TO_HOST,TO_USER) values ('%','r_1','%','u_1'); +insert into mysql.role_edges (FROM_HOST,FROM_USER,TO_HOST,TO_USER) values ('%','r_2','%','u_1'); +flush privileges; +-- error 3530 +SET DEFAULT ROLE r_3 TO u_1; +-- error 1396 +SET DEFAULT ROLE r_1 TO u_1000; +-- error 3530 +SET DEFAULT ROLE r_1, r_3 TO u_1; +SET DEFAULT ROLE r_1 TO u_1; +SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"; +SET DEFAULT ROLE r_2 TO u_1; +SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"; +SET DEFAULT ROLE ALL TO u_1; +SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"; +SET DEFAULT ROLE NONE TO u_1; +SELECT DEFAULT_ROLE_USER FROM mysql.default_roles WHERE USER="u_1"; +DROP USER r_1, r_2, r_3, u_1; + +# TestIssue17247 +drop user if exists 'issue17247'; +create user 'issue17247'; +grant CREATE USER on *.* to 'issue17247'; +connect (conn1, localhost, issue17247,,); +ALTER USER USER() IDENTIFIED BY 'xxx'; +ALTER USER CURRENT_USER() IDENTIFIED BY 'yyy'; +ALTER USER CURRENT_USER IDENTIFIED BY 'zzz'; +connection default; +ALTER USER 'issue17247'@'%' IDENTIFIED BY 'kkk'; +ALTER USER 'issue17247'@'%' IDENTIFIED BY PASSWORD '*B50FBDB37F1256824274912F2A1CE648082C3F1F'; +connection conn1; +-- error 1064 +ALTER USER USER() IDENTIFIED BY PASSWORD '*B50FBDB37F1256824274912F2A1CE648082C3F1F'; +connection default; +disconnect conn1; + +# TestDo +drop table if exists t; +do 1, @a:=1; +select @a; +create table t (i int); +insert into t values (1); +select * from t; +do @a := (select * from t where i = 1); +connect (conn1, localhost, root,, executor__simple); +insert into t values (2); +connection default; +disconnect conn1; +select * from t; + +# TestSetRoleAllCorner +# For user with no role, `SET ROLE ALL` should active +# a empty slice, rather than nil. +drop user if exists set_role_all; +create user set_role_all; +connect (conn1, localhost, set_role_all,,); +set role all; +select current_role; +connection default; +disconnect conn1; + +# TestCreateRole +drop user if exists testCreateRole; +drop role if exists test_create_role; +create user testCreateRole; +grant CREATE USER on *.* to testCreateRole; + +connect (conn1, localhost, testCreateRole,,); +create role test_create_role; + +connection default; +revoke CREATE USER on *.* from testCreateRole; +grant CREATE ROLE on *.* to testCreateRole; +drop role test_create_role; + +connection conn1; +create role test_create_role; + +connection default; +drop role test_create_role; + +connection conn1; +--error 1227 +create user test_create_role; + +connection default; +drop user testCreateRole; +disconnect conn1; + +# TestDropRole +drop user if exists testCreateRole; +drop role if exists test_create_role; +create user testCreateRole; +create user test_create_role; +grant CREATE USER on *.* to testCreateRole; + +connect (conn1, localhost, testCreateRole,,); +drop role test_create_role; + +connection default; +revoke CREATE USER on *.* from testCreateRole; +create role test_create_role; +grant DROP ROLE on *.* to testCreateRole; + +connection conn1; +drop role test_create_role; + +connection default; +create user test_create_role; + +connection conn1; +--error 1227 +drop user test_create_role; + +connection default; +drop user testCreateRole; +disconnect conn1; + +# TestSetResourceGroup +SET GLOBAL tidb_enable_resource_control='on'; +drop RESOURCE GROUP if exists rg1; +drop RESOURCE GROUP if exists rg2; +drop user if exists user1; +-- error 8249 +SET RESOURCE GROUP rg1; +CREATE RESOURCE GROUP rg1 ru_per_sec = 100; + +create user user1; +ALTER USER `user1` RESOURCE GROUP `rg1`; +SELECT CURRENT_RESOURCE_GROUP(); + +connect(conn1, localhost, user1,,); +SELECT CURRENT_RESOURCE_GROUP(); + +connection default; +SELECT CURRENT_RESOURCE_GROUP(); +CREATE RESOURCE GROUP rg2 ru_per_sec = 200; +SET RESOURCE GROUP `rg2`; +SELECT CURRENT_RESOURCE_GROUP(); +SET RESOURCE GROUP ``; +SELECT CURRENT_RESOURCE_GROUP(); +SET RESOURCE GROUP default; +SELECT CURRENT_RESOURCE_GROUP(); + +connection conn1; +SELECT CURRENT_RESOURCE_GROUP(); + +connection default; +disconnect conn1; +drop user user1; +SET GLOBAL tidb_enable_resource_control=default; + +# TestUserAttributes +drop user if exists testuser; +drop user if exists testuser1; +drop user if exists testuser2; + +## https://dev.mysql.com/doc/refman/8.0/en/create-user.html#create-user-comments-attributes +CREATE USER testuser COMMENT '1234'; +CREATE USER testuser1 ATTRIBUTE '{"name": "Tom", "age": 19}'; +-- error 3140 +CREATE USER testuser2 ATTRIBUTE '{"name": "Tom", age: 19}'; +CREATE USER testuser2; +SELECT user_attributes FROM mysql.user WHERE user = 'testuser'; +SELECT user_attributes FROM mysql.user WHERE user = 'testuser1'; +SELECT user_attributes FROM mysql.user WHERE user = 'testuser2'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +SELECT attribute->>"$.age" AS age, attribute->>"$.name" AS name FROM information_schema.user_attributes WHERE user = 'testuser1'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser2'; + +## https://dev.mysql.com/doc/refman/8.0/en/alter-user.html#alter-user-comments-attributes +ALTER USER testuser1 ATTRIBUTE '{"age": 20, "sex": "male"}'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +ALTER USER testuser1 ATTRIBUTE '{"hobby": "soccer"}'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +ALTER USER testuser1 ATTRIBUTE '{"sex": null, "hobby": null}'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +ALTER USER testuser1 COMMENT '5678'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +ALTER USER testuser1 COMMENT ''; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; +ALTER USER testuser1 ATTRIBUTE '{"comment": null}'; +SELECT attribute FROM information_schema.user_attributes WHERE user = 'testuser1'; + +## Non-root users could access COMMENT or ATTRIBUTE of all users via the view, +## but not via the mysql.user table. +connect (conn1, localhost, testuser1,,); +SELECT user, host, attribute FROM information_schema.user_attributes where user in ('testuser', 'testuser1', 'testuser2') ORDER BY user; +-- error 1142 +SELECT user, host, user_attributes FROM mysql.user ORDER BY user; + +## https://github.com/pingcap/tidb/issues/39207 +connection default; +create user usr1@'%' identified by 'passord'; +alter user usr1 comment 'comment1'; +select user_attributes from mysql.user where user = 'usr1'; +set global tidb_enable_resource_control = 'on'; +drop RESOURCE group if exists rg1; +CREATE RESOURCE GROUP rg1 ru_per_sec = 100; +alter user usr1 resource group rg1; +select user_attributes from mysql.user where user = 'usr1'; + +set global tidb_enable_resource_control = default; +disconnect conn1; diff --git a/tests/integrationtest/t/executor/split_table.test b/tests/integrationtest/t/executor/split_table.test index b21af7384674d..fa8981076f3ce 100644 --- a/tests/integrationtest/t/executor/split_table.test +++ b/tests/integrationtest/t/executor/split_table.test @@ -7,3 +7,140 @@ create table t(a int(20) auto_increment primary key); -- error 1690 split table t between (-9223372036854775808) and (9223372036854775807) regions 16; drop table if exists t; + +# TestSplitTableRegion +drop table if exists t, t1; +create table t(a varchar(100),b int, index idx1(b,a)); +split table t index idx1 by (10000,"abcd"),(10000000); +-- error 1265 +split table t index idx1 by ("abcd"); + +## Test for split index region. +## Check min value is more than max value. +split table t index idx1 between (0) and (1000000000) regions 10; +-- error 8212 +split table t index idx1 between (2,'a') and (1,'c') regions 10; + +## Check min value is invalid. +-- error 1105 +split table t index idx1 between () and (1) regions 10; + +## Check max value is invalid. +-- error 1105 +split table t index idx1 between (1) and () regions 10; + +## Check pre-split region num is too large. +-- error 1105 +split table t index idx1 between (0) and (1000000000) regions 10000; + +## Check pre-split region num 0 is invalid. +-- error 1105 +split table t index idx1 between (0) and (1000000000) regions 0; + +## Test truncate error msg. +-- error 1265 +split table t index idx1 between ("aa") and (1000000000) regions 0; + +## Test for split table region. +split table t between (0) and (1000000000) regions 10; + +## Check the lower value is more than the upper value. +-- error 8212 +split table t between (2) and (1) regions 10; + +## Check the lower value is invalid. +-- error 1105 +split table t between () and (1) regions 10; + +## Check upper value is invalid. +-- error 1105 +split table t between (1) and () regions 10; + +## Check pre-split region num is too large. +-- error 1105 +split table t between (0) and (1000000000) regions 10000; + +## Check pre-split region num 0 is invalid. +-- error 1105 +split table t between (0) and (1000000000) regions 0; + +## Test truncate error msg. +-- error 1265 +split table t between ("aa") and (1000000000) regions 10; + +## Test split table region step is too small. +-- error 8212 +split table t between (0) and (100) regions 10; + +## Test split region by syntax. +split table t by (0),(1000),(1000000); + +## Test split region twice to test for multiple batch split region requests. +create table t1(a int, b int); +split table t1 between(0) and (10000) regions 10; +split table t1 between(10) and (10010) regions 5; + +## Test split region for partition table. +drop table if exists t; +create table t (a int,b int) partition by hash(a) partitions 5; +split table t between (0) and (1000000) regions 5; + +## Test for `split for region` syntax. +split region for partition table t between (1000000) and (100000000) regions 10; + +## Test split region for partition table with specified partition. +split table t partition (p1,p2) between (100000000) and (1000000000) regions 5; + +## Test for `split for region` syntax. +split region for partition table t partition (p3,p4) between (100000000) and (1000000000) regions 5; + +# TestClusterIndexSplitTableIntegration +set tidb_enable_clustered_index=ON; +drop table if exists t; +create table t (a varchar(255), b double, c int, primary key (a, b)); +## Value list length not match. +-- error 1105 +split table t between ('aaa') and ('aaa', 100.0) regions 10; +-- error 1105 +split table t between ('aaa', 1.0) and ('aaa', 100.0, 11) regions 10; + +## Value type not match. +-- error 1265 +split table t between ('aaa', 0.0) and (100.0, 'aaa') regions 10; + +## lower bound >= upper bound. +-- error 8212 +split table t between ('aaa', 0.0) and ('aaa', 0.0) regions 10; +-- error 8212 +split table t between ('bbb', 0.0) and ('aaa', 0.0) regions 10; + +## Exceed limit 1000. +-- error 1105 +split table t between ('aaa', 0.0) and ('aaa', 0.1) regions 100000; + +## Split on null values. +-- error 1048 +split table t between (null, null) and (null, null) regions 1000; +-- error 1048 +split table t by (null, null); + +## Success. +split table t between ('aaa', 0.0) and ('aaa', 100.0) regions 10; +split table t by ('aaa', 0.0), ('aaa', 20.0), ('aaa', 100.0); +split table t by ('aaa', 100.0), ('qqq', 20.0), ('zzz', 100.0), ('zzz', 1000.0); +drop table t; +create table t (a int, b int, c int, d int, primary key(a, c, d)); +split table t between (0, 0, 0) and (0, 0, 1) regions 1000; +drop table t; +create table t (a int, b int, c int, d int, primary key(d, a, c)); +split table t by (0, 0, 0), (1, 2, 3), (65535, 65535, 65535); +drop table if exists t; +create table t (a varchar(255), b decimal, c int, primary key (a, b)); +-- error 1265 +split table t by ('aaa', ''); +drop table t; +CREATE TABLE t (`id` varchar(10) NOT NULL, primary key (`id`) CLUSTERED); +-- error 1176 +split table t index `primary` between (0) and (1000) regions 2; +set tidb_enable_clustered_index=default; + diff --git a/tests/integrationtest/t/executor/stale_txn.test b/tests/integrationtest/t/executor/stale_txn.test new file mode 100644 index 0000000000000..0d11911ead762 --- /dev/null +++ b/tests/integrationtest/t/executor/stale_txn.test @@ -0,0 +1,34 @@ +# TestIssue35686 +## This query should not panic +--disable_result_log +select * from information_schema.ddl_jobs as of timestamp now(); +--enable_result_log + +# TestIssue31954 +drop table if exists t1; +create table t1 (id int primary key, v int); +insert into t1 values(1, 10); +select sleep(0.1); +set @a=now(6); +select sleep(0.1); +update t1 set v=100 where id=1; +select * from t1 as of timestamp @a where v=(select v from t1 as of timestamp @a where id=1); +select (select v from t1 as of timestamp @a where id=1) as v; + +# TestIssue30872 +set tidb_txn_mode='pessimistic'; +set tx_isolation = 'READ-COMMITTED'; +drop table if exists t1; +create table t1 (id int primary key, v int); +insert into t1 values(1, 10); +select sleep(0.1); +set @a=now(6); +select sleep(0.1); +update t1 set v=100 where id=1; +set autocommit=0; +select * from t1 as of timestamp @a; + +set tidb_txn_mode = default; +set tx_isolation = default; +set autocommit = default; + diff --git a/tests/integrationtest/t/executor/statement_context.test b/tests/integrationtest/t/executor/statement_context.test new file mode 100644 index 0000000000000..1b26554549074 --- /dev/null +++ b/tests/integrationtest/t/executor/statement_context.test @@ -0,0 +1,91 @@ +# TestStatementContext +drop table if exists sc, sc2, sc3; +create table sc (a int); +insert sc values (1), (2); +set sql_mode = 'STRICT_TRANS_TABLES'; +select * from sc where a > cast(1.1 as decimal); +update sc set a = 4 where a > cast(1.1 as decimal); +set sql_mode = ''; +update sc set a = 3 where a > cast(1.1 as decimal); +select * from sc; +set sql_mode = 'STRICT_TRANS_TABLES'; +delete from sc; +insert sc values ('1.8'+1); +select * from sc; + +## Handle coprocessor flags, '1x' is an invalid int. +## UPDATE and DELETE do select request first which is handled by coprocessor. +## In strict mode we expect error. +# TODO: https://github.com/pingcap/tidb/issues/48120 +# -- error 1292 +# update sc set a = 4 where a > '1x'; +# -- error 1292 +# delete from sc where a < '1x'; +select * from sc where a > '1x'; + +## Non-strict mode never returns error. +set sql_mode = ''; +update sc set a = 4 where a > '1x'; +delete from sc where a < '1x'; +select * from sc where a > '1x'; + +## Test invalid UTF8 +create table sc2 (a varchar(255)); +## Insert an invalid UTF8 +insert sc2 values (unhex('4040ffff')); +select @@warning_count > 0; +select * from sc2; +set sql_mode = 'STRICT_TRANS_TABLES'; +-- error 1366 +insert sc2 values (unhex('4040ffff')); +set @@tidb_skip_utf8_check = '1'; +insert sc2 values (unhex('4040ffff')); +select length(a) from sc2; +set @@tidb_skip_utf8_check = '0'; +insert sc2 values ('�'); + +## Test invalid ASCII +create table sc3 (a varchar(255)) charset ascii; +set sql_mode = ''; +insert sc3 values (unhex('4040ffff')); +select @@warning_count > 0; +select * from sc3; +set sql_mode = 'STRICT_TRANS_TABLES'; +-- error 1366 +insert sc3 values (unhex('4040ffff')); +set @@tidb_skip_ascii_check = '1'; +insert sc3 values (unhex('4040ffff')); +select length(a) from sc3; + +## no placeholder in ASCII, so just insert '@@'... +set @@tidb_skip_ascii_check = '0'; +insert sc3 values (unhex('4040')); + +## Test non-BMP characters. +set sql_mode = ''; +drop table if exists t1; +create table t1(a varchar(100) charset utf8); +insert t1 values (unhex('f09f8c80')); +select @@warning_count > 0; +select * from t1; +insert t1 values (unhex('4040f09f8c80')); +select @@warning_count > 0; +select * from t1; +select length(a) from t1; +set sql_mode = 'STRICT_TRANS_TABLES'; +-- error 1366 +insert t1 values (unhex('f09f8c80')); +-- error 1366 +insert t1 values (unhex('F0A48BAE')); +set global tidb_check_mb4_value_in_utf8 = false; +insert t1 values (unhex('f09f8c80')); +set global tidb_check_mb4_value_in_utf8 = true; +-- error 1366 +insert t1 values (unhex('F0A48BAE')); +drop table if exists t1; + +set global tidb_check_mb4_value_in_utf8 = default; +set sql_mode = default; +set @@tidb_skip_ascii_check = default; +set @@tidb_skip_utf8_check = default; + diff --git a/tests/integrationtest/t/executor/update.test b/tests/integrationtest/t/executor/update.test new file mode 100644 index 0000000000000..2eed96a2cd228 --- /dev/null +++ b/tests/integrationtest/t/executor/update.test @@ -0,0 +1,268 @@ +# TestUpdateGenColInTxn +drop table if exists t; +create table t(a bigint, b bigint as (a+1)); +begin; +insert into t(a) values(1); +-- error 3105 +update t set b=6 where b=2; +commit; +select * from t; + +# TestUpdateWithAutoidSchema +drop table if exists t1, t2, t3; +create table t1(id int primary key auto_increment, n int); +create table t2(id int primary key, n float auto_increment, key I_n(n)); +create table t3(id int primary key, n double auto_increment, key I_n(n)); +insert into t1 set n = 1; +select * from t1 where id = 1; +update t1 set id = id+1; +select * from t1 where id = 2; +insert into t1 set n = 2; +select * from t1 where id = 3; +update t1 set id = id + '1.1' where id = 3; +select * from t1 where id = 4; +insert into t1 set n = 3; +select * from t1 where id = 5; +update t1 set id = id + '0.5' where id = 5; +select * from t1 where id = 6; +insert into t1 set n = 4; +select * from t1 where id = 7; +insert into t2 set id = 1; +select * from t2 where id = 1; +update t2 set n = n+1; +select * from t2 where id = 1; +insert into t2 set id = 2; +select * from t2 where id = 2; +update t2 set n = n + '2.2'; +select * from t2 where id = 2; +insert into t2 set id = 3; +select * from t2 where id = 3; +update t2 set n = n + '0.5' where id = 3; +select * from t2 where id = 3; +insert into t2 set id = 4; +select * from t2 where id = 4; +insert into t3 set id = 1; +select * from t3 where id = 1; +update t3 set n = n+1; +select * from t3 where id = 1; +insert into t3 set id = 2; +select * from t3 where id = 2; +update t3 set n = n + '3.3'; +select * from t3 where id = 2; +insert into t3 set id = 3; +select * from t3 where id = 3; +update t3 set n = n + '0.5' where id = 3; +select * from t3 where id = 3; +insert into t3 set id = 4; +select * from t3 where id = 4; + +# TestUpdateMultiDatabaseTable +drop table if exists t; +drop database if exists test2; +create database test2; +create table t(a int, b int generated always as (a+1) virtual); +create table test2.t(a int, b int generated always as (a+1) virtual); +update t, test2.t set executor__update.t.a=1; +drop database test2; + +# TestUpdateSwapColumnValues +drop table if exists t1, t2; +create table t1 (c_str varchar(40)); +create table t2 (c_str varchar(40)); +insert into t1 values ('Alice'); +insert into t2 values ('Bob'); +select t1.c_str, t2.c_str from t1, t2 where t1.c_str <= t2.c_str; +update t1, t2 set t1.c_str = t2.c_str, t2.c_str = t1.c_str where t1.c_str <= t2.c_str; +select t1.c_str, t2.c_str from t1, t2 where t1.c_str <= t2.c_str; +drop table if exists t; +create table t (a int, b int); +insert into t values(1, 2); +select * from t; +update t set a=b, b=a; +select * from t; +drop table if exists t; +create table t (a int, b int); +insert into t values (1,3); +select * from t; +update t set a=b, b=a; +select * from t; +drop table if exists t; +create table t (a int, b int, c int as (-a) virtual, d int as (-b) stored); +insert into t(a, b) values (10, 11), (20, 22); +select * from t; +update t set a=b, b=a; +select * from t; +update t set b=30, a=b; +select * from t; + +# TestMultiUpdateOnSameTable +drop table if exists t; +create table t(x int, y int); +insert into t values(); +update t t1, t t2 set t2.y=1, t1.x=2; +select * from t; +update t t1, t t2 set t1.x=t2.y, t2.y=t1.x; +select * from t; + +## Update generated columns +drop table if exists t; +create table t(x int, y int, z int as (x+10) stored, w int as (y-10) virtual); +insert into t(x, y) values(1, 2), (3, 4); +update t t1, t t2 set t2.y=1, t1.x=2 where t1.x=1; +select * from t; +update t t1, t t2 set t1.x=5, t2.y=t1.x where t1.x=3; +select * from t; +drop table if exists t; +create table t(a int, b int, c int as (a+b) stored); +insert into t(a, b) values (1, 2); +update t t1, t t2 set t2.a=3; +select * from t; +update t t1, t t2 set t1.a=4, t2.b=5; +select * from t; + +## Update primary keys +drop table if exists t; +create table t (a int primary key); +insert into t values (1), (2); +update t set a=a+2; +select * from t; +update t m, t n set m.a = n.a+10 where m.a=n.a; +select * from t; +drop table if exists t; +create table t (a int primary key, b int); +insert into t values (1,3), (2,4); +-- error 1706 +update t m, t n set m.a = n.a+10, n.b = m.b+1 where m.a=n.a; +drop table if exists t; +create table t (a int, b int, c int, primary key(a, b)); +insert into t values (1,3,5), (2,4,6); +update t m, t n set m.a = n.a+10, m.b = n.b+10 where m.a=n.a; +select * from t; +update t m, t n, t q set q.c=m.a+n.b, n.c = m.a+1, m.c = n.b+1 where m.b=n.b AND m.a=q.a; +select * from t; +-- error 1706 +update t m, t n, t q set m.a = m.a+1, n.c = n.c-1, q.c = q.a+q.b where m.b=n.b and n.b=q.b; + +# TestUpdateClusterIndex +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(id varchar(200) primary key, v int); +insert into t(id, v) values ('abc', 233); +select id, v from t where id = 'abc'; +update t set id = 'dfg' where id = 'abc'; +select * from t; +update t set id = 'aaa', v = 333 where id = 'dfg'; +select * from t where id = 'aaa'; +update t set v = 222 where id = 'aaa'; +select * from t where id = 'aaa'; +insert into t(id, v) values ('bbb', 111); +-- error 1062 +update t set id = 'bbb' where id = 'aaa'; +drop table if exists ut3pk; +create table ut3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into ut3pk(id1, id2, v, id3) values ('aaa', 'bbb', 233, 111); +select id1, id2, id3, v from ut3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +update ut3pk set id1 = 'abc', id2 = 'bbb2', id3 = 222, v = 555 where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +select id1, id2, id3, v from ut3pk where id1 = 'abc' and id2 = 'bbb2' and id3 = 222; +select id1, id2, id3, v from ut3pk; +update ut3pk set v = 666 where id1 = 'abc' and id2 = 'bbb2' and id3 = 222; +select id1, id2, id3, v from ut3pk; +insert into ut3pk(id1, id2, id3, v) values ('abc', 'bbb3', 222, 777); +-- error 1062 +update ut3pk set id2 = 'bbb3' where id1 = 'abc' and id2 = 'bbb2' and id3 = 222; +drop table if exists ut1pku; +create table ut1pku(id varchar(200) primary key, uk int, v int, unique key ukk(uk)); +insert into ut1pku(id, uk, v) values('a', 1, 2), ('b', 2, 3); +select * from ut1pku; +update ut1pku set uk = 3 where id = 'a'; +select * from ut1pku; +-- error 1062 +update ut1pku set uk = 2 where id = 'a'; +select * from ut1pku; +drop table if exists t; +create table t(a char(10) primary key, b char(10)); +insert into t values('a', 'b'); +update t set a='c' where t.a='a' and b='b'; +select * from t; +drop table if exists s; +create table s (a int, b int, c int, primary key (a, b)); +insert s values (3, 3, 3), (5, 5, 5); +update s set c = 10 where a = 3; +select * from s; +set tidb_enable_clustered_index = default; + +# TestDeleteClusterIndex +set tidb_enable_clustered_index = on; +drop table if exists t; +create table t(id varchar(200) primary key, v int); +insert into t(id, v) values ('abc', 233); +delete from t where id = 'abc'; +select * from t; +select * from t where id = 'abc'; +drop table if exists it3pk; +create table it3pk(id1 varchar(200), id2 varchar(200), v int, id3 int, primary key(id1, id2, id3)); +insert into it3pk(id1, id2, v, id3) values ('aaa', 'bbb', 233, 111); +delete from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +select * from it3pk; +select * from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +insert into it3pk(id1, id2, v, id3) values ('aaa', 'bbb', 433, 111); +select * from it3pk where id1 = 'aaa' and id2 = 'bbb' and id3 = 111; +drop table if exists dt3pku; +create table dt3pku(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +insert into dt3pku(id, uk, v) values('a', 1, 2); +delete from dt3pku where id = 'a'; +select * from dt3pku; +insert into dt3pku(id, uk, v) values('a', 1, 2); +drop table if exists s1; +create table s1 (a int, b int, c int, primary key (a, b)); +insert s1 values (3, 3, 3), (5, 5, 5); +delete from s1 where a = 3; +select * from s1; +set tidb_enable_clustered_index = default; + +# TestReplaceClusterIndex +set tidb_enable_clustered_index = on; +drop table if exists rt1pk; +create table rt1pk(id varchar(200) primary key, v int); +replace into rt1pk(id, v) values('abc', 1); +select * from rt1pk; +replace into rt1pk(id, v) values('bbb', 233), ('abc', 2); +select * from rt1pk; +drop table if exists rt3pk; +create table rt3pk(id1 timestamp, id2 time, v int, id3 year, primary key(id1, id2, id3)); +replace into rt3pk(id1, id2,id3, v) values('2018-01-01 11:11:11', '22:22:22', '2019', 1); +select * from rt3pk; +replace into rt3pk(id1, id2, id3, v) values('2018-01-01 11:11:11', '22:22:22', '2019', 2); +select * from rt3pk; +drop table if exists rt1pk1u; +create table rt1pk1u(id varchar(200) primary key, uk int, v int, unique key uuk(uk)); +replace into rt1pk1u(id, uk, v) values("abc", 2, 1); +select * from rt1pk1u; +replace into rt1pk1u(id, uk, v) values("aaa", 2, 11); +select * from rt1pk1u; +set tidb_enable_clustered_index = default; + +# TestOutOfRangeWithUnsigned +drop table if exists t; +create table t(ts int(10) unsigned NULL DEFAULT NULL); +insert into t values(1); +-- error 1690 +update t set ts = IF(ts < (0 - ts), 1,1) where ts>0; + +# TestIssue23553 +drop table if exists tt; +create table tt (m0 varchar(64), status tinyint not null); +insert into tt values('1',0),('1',0),('1',0); +update tt a inner join (select m0 from tt where status!=1 group by m0 having count(*)>1) b on a.m0=b.m0 set a.status=1; + +# TestUpdateUnsignedWithOverflow +# see issue https://github.com/pingcap/tidb/issues/47816 +drop table if exists t1; +create table t1(id int, a int unsigned); +set sql_mode=''; +insert into t1 values(1, 10), (2, 20); +update t1 set a='-1' where id=1; +update t1 set a='1000000000000000000' where id=2; +select id, a from t1 order by id asc; +set sql_mode=default; + diff --git a/tests/integrationtest/t/executor/window.test b/tests/integrationtest/t/executor/window.test new file mode 100644 index 0000000000000..1d55cf76ed36f --- /dev/null +++ b/tests/integrationtest/t/executor/window.test @@ -0,0 +1,25 @@ +# TestIssue24264 +drop table if exists tbl_2; +create table tbl_2 ( col_10 char(65) collate utf8mb4_unicode_ci not null , col_11 bigint not null , col_12 datetime not null , col_13 bigint unsigned default 327695751717730004 , col_14 timestamp default '2010-11-18' not null , primary key idx_5 ( col_11,col_13 ) /*T![clustered_index] clustered */ , unique key idx_6 ( col_10,col_11,col_13 ) , unique key idx_7 ( col_14,col_12,col_13 ) ); +insert into tbl_2 values ( 'RmF',-5353757041350034197,'1996-01-22',1866803697729291364,'1996-09-11' ); +insert into tbl_2 values ( 'xEOGaB',-6602924241498980347,'2019-02-22',8297270320597030697,'1972-04-04' ); +insert into tbl_2 values ( 'dvUztqgTPAhLdzgEsV',3316448219481769821,'2034-09-12',937089564901142512,'2030-12-04' ); +insert into tbl_2 values ( 'mNoyfbT',-6027094365061219400,'2035-10-10',1752804734961508175,'1992-08-09' ); +insert into tbl_2 values ( 'BDPJMhLYXuKB',6823702503458376955,'2015-04-09',737914379167848827,'2026-04-29' ); +insert into tbl_2 values ( 'WPiaVfPstGohvHd',1308183537252932688,'2020-05-03',5364104746649397703,'1979-01-28' ); +insert into tbl_2 values ( 'lrm',4642935044097656317,'1973-04-29',149081313305673035,'2013-02-03' ); +insert into tbl_2 values ( '',-7361040853169906422,'2024-10-22',6308270832310351889,'1981-02-01' ); +insert into tbl_2 values ( 'uDANahGcLwpSssabD',2235074865448210231,'1992-10-10',7140606140672586593,'1992-11-25' ); +insert into tbl_2 values ( 'TDH',-1911014243756021618,'2013-01-26',2022218243939205750,'1982-04-04' ); +select lead(col_13,1,NULL) over w from tbl_2 window w as (order by col_13); + +# TestIssue29947 +drop table if exists t_tir89b, t_vejdy; +CREATE TABLE `t_tir89b` (`c_3pcik` int(11) DEFAULT NULL,`c_0b6nxb` text DEFAULT NULL,`c_qytrlc` double NOT NULL,`c_sroc_c` int(11) DEFAULT NULL,PRIMARY KEY (`c_qytrlc`) /*T![clustered_index] NONCLUSTERED */ ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +INSERT INTO t_tir89b VALUES (66,'cjd1o',87.77,NULL),(134217728,'d_unpd',76.66,NULL),(50,'_13gs',1.46,32),(49,'xclvsc',64.7,48),(7,'1an13',70.86,7),(29,NULL,6.26,6),(8,'hc485b',47.44,2),(84,'d_nlmd',99.3,76),(14,'lbny1c',61.1,47),(45,'9r5bid',25.37,95),(49,'jbz5r',72.99,49),(18,'uode3d',7.21,992),(-8945040,'ftrtib',47.47,20),(29,'algrj',6.28,24),(96,NULL,67.83,24),(5,'s1gfz',89.18,78),(74,'ggqbl',83.89,68),(61,'5n1q7',26.92,6),(10,'4gflb',33.84,28),(48,'xoe0cd',84.71,77),(6,'xkh6i',53.83,19),(5,NULL,89.1,46),(49,'4q6nx',31.5,384),(1,'pgs1',66.8,77),(19,'lltflc',33.49,63),(87,'vd4htc',39.92,-5367008),(47,NULL,28.3,10),(29,'15jqfc',100.11,64),(45,'ii6pm',52.41,61),(0,NULL,85.27,19),(104,'ikpxnb',40.66,955),(40,'gzryzd',36.23,42),(18,'7UPNE',84.27,14),(32,NULL,84.8,53),(51,'2c5lfb',18.98,74),(97,NULL,22.89,6),(70,'guyzyc',96.29,89),(34,'dvdoqb',53.82,1),(94,'6eop6b',81.77,90),(42,'p7vsnd',62.54,NULL); +CREATE TABLE `t_vejdy` (`c_iovir` int(11) NOT NULL,`c_r_mw3d` double DEFAULT NULL,`c_uxhghb` int(11) DEFAULT NULL,`c_rb7otb` int(11) NOT NULL,`c_dplyac` int(11) DEFAULT NULL,`c_lmcqed` double DEFAULT NULL,`c_ayaoed` text DEFAULT NULL,`c__zbqr` int(11) DEFAULT NULL,PRIMARY KEY (`c_iovir`,`c_rb7otb`) /*T![clustered_index] NONCLUSTERED */,KEY `t_e1ejcd` (`c_uxhghb`),KEY `t_o6ui_b` (`c_iovir`,`c_r_mw3d`,`c_uxhghb`,`c_rb7otb`,`c_dplyac`,`c_lmcqed`)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; +INSERT INTO t_vejdy VALUES (49,100.11,68,57,44,17.93,NULL,84),(38,56.91,78,30,0,53.28,'cjd1o',2),(6,NULL,NULL,88,81,93.47,'0jftkb',54),(73,91.51,31,82,3,38.12,'buesob',40),(7,26.73,7,78,9,NULL,'fd5kgd',49),(80,70.57,4,47,43,25.59,'glpoq',44),(79,94.16,15,0,0,79.55,'0ok94d',56),(58,NULL,50,69,2,65.46,'sm6rj',29),(41472,6.51,70,1080,100,43.18,'fofk4c',43),(0,6.2,57,97,2,56.17,'zqpzq',56),(72,76.66,97,88,95,75.47,'hikxqb',34),(27,1.11,134217728,57,25,NULL,'4gflb',0),(64,NULL,47,69,6,72.5,'w7jmhd',45),(-134217679,88.74,33,82,85,59.89,NULL,26),(59,97.98,37,28,33,61.1,'xioxdd',45),(6,47.31,0,0,-19,38.77,'uxmdlc',17),(82,28.62,36,70,39,11.79,'zzi8cc',2),(33,37.3,55,86,69,60.56,'mn_xx',0),(7,NULL,80,0,17,59.79,'5n1q7',97),(88,50.81,15,30,63,25.37,'ordwed',29),(48,4.32,90,48,38,84.62,'lclx',32),(10,NULL,95,75,1,21.64,NULL,85),(62,NULL,0,30,10,NULL,'7bacud',5),(50,38.81,6,0,6,64.28,'gpibn',57),(1,46.8,21,32,46,33.38,NULL,6),(29,NULL,38,7,91,31.5,'pdzdl',24),(54,6.26,1,85,22,75.63,'gl4_7',29),(1,90.37,63,63,6,61.2,'wvw23b',86),(47,NULL,82,73,0,95.79,'uipcf',NULL),(46,48.1,37,6,1,52.33,'gthpic',0),(41,75.1,7,44,5,84.16,'fe_e5',58),(43,87.71,81,32,28,91.98,'9e5nvc',66),(20,58.21,88,75,92,43.64,'kagroc',66),(91,52.75,22,14,80,NULL,'\'_YN6MD\'',6),(72,94.83,0,49,5,57.82,NULL,23),(7,100.11,0,92,13,6.28,NULL,0); +begin; +delete from t_tir89b where t_tir89b.c_3pcik >= t_tir89b.c_sroc_c; +select * from (select count(*) over (partition by ref_0.c_0b6nxb order by ref_0.c_3pcik) as c0 from t_tir89b as ref_0) as subq_0 where subq_0.c0 <> 1; +commit; diff --git a/tests/integrationtest/t/expression/charset_and_collation.test b/tests/integrationtest/t/expression/charset_and_collation.test index 61d01c1dc275d..ef5ecffab1b88 100644 --- a/tests/integrationtest/t/expression/charset_and_collation.test +++ b/tests/integrationtest/t/expression/charset_and_collation.test @@ -356,8 +356,7 @@ select * from t use index(ud); select * from t use index(e); select * from t use index(ue); admin check table t; -# TODO: fix https://github.com/pingcap/tidb/issues/47687 -# admin recover index t a; +admin recover index t a; alter table t add column n char(10) COLLATE utf8mb4_unicode_ci; alter table t add index n(n); update t set n = '吧'; diff --git a/tests/integrationtest/t/expression/json.test b/tests/integrationtest/t/expression/json.test index ca41e0b701fea..142b46288ce18 100644 --- a/tests/integrationtest/t/expression/json.test +++ b/tests/integrationtest/t/expression/json.test @@ -359,3 +359,13 @@ select json_extract('[{"a": [1,2,3,4]}]', '$[0].a[1 to 100]'); select json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to last]'); select json_extract('[{"a": [1,2,3,4]}]', '$[0].a[0 to 2]'); +# TestCastJSONStringToInteger +drop table if exists t; +create table t (a json); +insert into t values ('"-1"'); +insert into t values ('"18446744073709551615"'); +insert into t values ('"18446744073709552000"'); +-- sorted_result +select a, cast(a as unsigned) from t; +-- sorted_result +select a, cast(a as signed) from t; diff --git a/tests/integrationtest/t/infoschema/infoschema.test b/tests/integrationtest/t/infoschema/infoschema.test index f03255a6250f2..a6ef515653436 100644 --- a/tests/integrationtest/t/infoschema/infoschema.test +++ b/tests/integrationtest/t/infoschema/infoschema.test @@ -10,6 +10,7 @@ select count(length(query)) from information_schema.ddl_jobs; # NO ERROR # TestInfoSchemaRenameTable drop table if EXISTS t1; +drop table if EXISTS mysql.t1, mysql.t2, mysql.t3; create table infoschema__infoschema.t1 (id int primary key, a text); insert infoschema__infoschema.t1 values(1,'334'),(4,'3443435'),(5,'fdf43t536653'); rename table infoschema__infoschema.t1 to mysql.t1; diff --git a/tests/integrationtest/t/planner/core/issuetest/planner_issue.test b/tests/integrationtest/t/planner/core/issuetest/planner_issue.test index 74cce48bd96a9..1b58c7c5046c7 100644 --- a/tests/integrationtest/t/planner/core/issuetest/planner_issue.test +++ b/tests/integrationtest/t/planner/core/issuetest/planner_issue.test @@ -79,3 +79,60 @@ insert into tbl_39 values (1994),(1995),(1996),(1997); explain select /*+ use_index_merge( tbl_39) */ col_239 from tbl_39 where not( tbl_39.col_239 not in ( '1994' ) ) and tbl_39.col_239 not in ( '2004' , '2010' , '2010' ) or not( tbl_39.col_239 <= '1996' ) and not( tbl_39.col_239 between '2026' and '2011' ) order by tbl_39.col_239 limit 382; select /*+ use_index_merge( tbl_39) */ col_239 from tbl_39 where not( tbl_39.col_239 not in ( '1994' ) ) and tbl_39.col_239 not in ( '2004' , '2010' , '2010' ) or not( tbl_39.col_239 <= '1996' ) and not( tbl_39.col_239 between '2026' and '2011' ) order by tbl_39.col_239 limit 382; +# https://github.com/pingcap/tidb/issues/47881 +drop table if exists t, t1, t2; +create table t (id int,name varchar(10)); +insert into t values(1,'tt'); +create table t1(id int,name varchar(10),name1 varchar(10),name2 varchar(10)); +insert into t1 values(1,'tt','ttt','tttt'),(2,'dd','ddd','dddd'); +create table t2(id int,name varchar(10),name1 varchar(10),name2 varchar(10),`date1` date); +insert into t2 values(1,'tt','ttt','tttt','2099-12-31'),(2,'dd','ddd','dddd','2099-12-31'); +WITH bzzs AS ( + SELECT + count(1) AS bzn + FROM + t c +), +tmp1 AS ( + SELECT + t1.* + FROM + t1 + LEFT JOIN bzzs ON 1 = 1 + WHERE + name IN ('tt') + AND bzn <> 1 +), +tmp2 AS ( + SELECT + tmp1.*, + date('2099-12-31') AS endate + FROM + tmp1 +), +tmp3 AS ( + SELECT + * + FROM + tmp2 + WHERE + endate > CURRENT_DATE + UNION ALL + SELECT + '1' AS id, + 'ss' AS name, + 'sss' AS name1, + 'ssss' AS name2, + date('2099-12-31') AS endate + FROM + bzzs t1 + WHERE + bzn = 1 +) +SELECT + c2.id, + c3.id +FROM + t2 db + LEFT JOIN tmp3 c2 ON c2.id = '1' + LEFT JOIN tmp3 c3 ON c3.id = '1'; diff --git a/tests/integrationtest/t/planner/core/tests/prepare/issue.test b/tests/integrationtest/t/planner/core/tests/prepare/issue.test index e15e95ca30385..0056cfb8b2144 100644 --- a/tests/integrationtest/t/planner/core/tests/prepare/issue.test +++ b/tests/integrationtest/t/planner/core/tests/prepare/issue.test @@ -199,6 +199,8 @@ execute stmt using @a, @b, @c, @d, @e; select @@last_plan_from_cache; execute stmt using @f, @b, @c, @d, @e; select @@last_plan_from_cache; +set tidb_enable_prepared_plan_cache=DEFAULT; +set @@tidb_enable_collect_execution_info=DEFAULT; # TestIssue29805 set tidb_enable_prepared_plan_cache=1; diff --git a/tests/integrationtest/t/privilege/privileges.test b/tests/integrationtest/t/privilege/privileges.test index 719253443426e..9dcb9409c319f 100644 --- a/tests/integrationtest/t/privilege/privileges.test +++ b/tests/integrationtest/t/privilege/privileges.test @@ -25,7 +25,7 @@ CREATE USER 'testnotexist'@'localhost'; CREATE DATABASE IF NOT EXISTS privilege__privileges; CREATE TABLE privilege__privileges.t1 (a int); connect (testnotexist,localhost,testnotexist,,); -connection testnotexist; +connection testnotexist; --error 1142 SELECT * FROM privilege__privileges.t1; --error 1142 @@ -47,6 +47,7 @@ connection default; drop table t1; # TestGrantRoutine +drop user if exists u1; CREATE USER u1; CREATE TABLE routine_table (a int); GRANT CREATE ROUTINE on privilege__privileges.* to u1; @@ -229,6 +230,7 @@ connection default; # TestRevokePrivileges +drop user if exists u4, 'hasgrant', 'withoutgrant'; CREATE USER 'hasgrant'; CREATE USER 'withoutgrant'; GRANT ALL ON *.* TO 'hasgrant'; @@ -367,6 +369,8 @@ connection default; # TestCreateDropUser +drop user if exists tcd1, tcd2, tcd3, usr1; +drop resource group if exists rg1; CREATE USER tcd1, tcd2; GRANT ALL ON *.* to tcd2 WITH GRANT OPTION; @@ -805,4 +809,6 @@ SHOW GRANTS FOR CURRENT_USER() USING notgranted; SHOW GRANTS FOR current_user() USING otherrole; SHOW GRANTS FOR joe USING otherrole; disconnect joe; -connection default; \ No newline at end of file +connection default; + +set global tidb_enable_resource_control = default; \ No newline at end of file diff --git a/tests/realtikvtest/addindextest/BUILD.bazel b/tests/realtikvtest/addindextest/BUILD.bazel index 53910ff9b5d98..c00366a0974c8 100644 --- a/tests/realtikvtest/addindextest/BUILD.bazel +++ b/tests/realtikvtest/addindextest/BUILD.bazel @@ -26,44 +26,15 @@ go_test( "add_index_test.go", "concurrent_ddl_test.go", "failpoints_test.go", - "global_sort_test.go", - "integration_test.go", "main_test.go", "multi_schema_change_test.go", - "operator_test.go", "pitr_test.go", ], embed = [":addindextest"], deps = [ - "//br/pkg/lightning/backend/external", - "//br/pkg/lightning/backend/local", - "//br/pkg/storage", "//pkg/config", - "//pkg/ddl", - "//pkg/ddl/copr", - "//pkg/ddl/ingest", - "//pkg/ddl/testutil", - "//pkg/ddl/util/callback", - "//pkg/disttask/framework/dispatcher", - "//pkg/disttask/framework/proto", - "//pkg/disttask/operator", - "//pkg/domain", - "//pkg/errno", - "//pkg/kv", - "//pkg/parser/model", - "//pkg/sessionctx", - "//pkg/sessionctx/variable", - "//pkg/table", - "//pkg/table/tables", "//pkg/testkit", - "//pkg/util/chunk", "//tests/realtikvtest", - "@com_github_fsouza_fake_gcs_server//fakestorage", - "@com_github_ngaut_pools//:pools", - "@com_github_phayes_freeport//:freeport", - "@com_github_pingcap_failpoint//:failpoint", - "@com_github_stretchr_testify//assert", "@com_github_stretchr_testify//require", - "@org_golang_x_sync//errgroup", ], ) diff --git a/tests/realtikvtest/addindextest/add_index_test.go b/tests/realtikvtest/addindextest/add_index_test.go index 409e3f1ba99df..3e71d83596082 100644 --- a/tests/realtikvtest/addindextest/add_index_test.go +++ b/tests/realtikvtest/addindextest/add_index_test.go @@ -17,16 +17,9 @@ package addindextest import ( "testing" - "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/config" - "github.com/pingcap/tidb/pkg/ddl" - "github.com/pingcap/tidb/pkg/ddl/util/callback" - "github.com/pingcap/tidb/pkg/disttask/framework/dispatcher" - "github.com/pingcap/tidb/pkg/disttask/framework/proto" - "github.com/pingcap/tidb/pkg/parser/model" "github.com/pingcap/tidb/pkg/testkit" "github.com/pingcap/tidb/tests/realtikvtest" - "github.com/stretchr/testify/require" ) func init() { @@ -133,150 +126,3 @@ func TestAddForeignKeyWithAutoCreateIndex(t *testing.T) { tk.MustExec("update employee set pid=id-1 where id>1 and pid is null") tk.MustExec("alter table employee add foreign key fk_1(pid) references employee(id)") } - -func TestAddIndexDistBasic(t *testing.T) { - store := realtikvtest.CreateMockStoreAndSetup(t) - if store.Name() != "TiKV" { - t.Skip("TiKV store only") - } - - tk := testkit.NewTestKit(t, store) - tk.MustExec("drop database if exists test;") - tk.MustExec("create database test;") - tk.MustExec("use test;") - tk.MustExec(`set global tidb_enable_dist_task=1;`) - - tk.MustExec("create table t(a bigint auto_random primary key) partition by hash(a) partitions 20;") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("split table t between (3) and (8646911284551352360) regions 50;") - tk.MustExec("alter table t add index idx(a);") - tk.MustExec("admin check index t idx;") - - tk.MustExec("create table t1(a bigint auto_random primary key);") - tk.MustExec("insert into t1 values (), (), (), (), (), ()") - tk.MustExec("insert into t1 values (), (), (), (), (), ()") - tk.MustExec("insert into t1 values (), (), (), (), (), ()") - tk.MustExec("insert into t1 values (), (), (), (), (), ()") - tk.MustExec("split table t1 between (3) and (8646911284551352360) regions 50;") - tk.MustExec("alter table t1 add index idx(a);") - tk.MustExec("admin check index t1 idx;") - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/disttask/framework/scheduler/MockRunSubtaskContextCanceled", "1*return(true)")) - tk.MustExec("alter table t1 add index idx1(a);") - tk.MustExec("admin check index t1 idx1;") - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/disttask/framework/scheduler/MockRunSubtaskContextCanceled")) - tk.MustExec(`set global tidb_enable_dist_task=0;`) -} - -func TestAddIndexDistCancel(t *testing.T) { - store := realtikvtest.CreateMockStoreAndSetup(t) - if store.Name() != "TiKV" { - t.Skip("TiKV store only") - } - - tk := testkit.NewTestKit(t, store) - tk1 := testkit.NewTestKit(t, store) - tk.MustExec("drop database if exists test;") - tk.MustExec("create database test;") - tk.MustExec("use test;") - tk.MustExec(`set global tidb_enable_dist_task=1;`) - - tk.MustExec("create table t(a bigint auto_random primary key) partition by hash(a) partitions 8;") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("split table t between (3) and (8646911284551352360) regions 50;") - - ddl.MockDMLExecutionAddIndexSubTaskFinish = func() { - row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() - require.Equal(t, 1, len(row)) - jobID := row[0][0].(string) - tk1.MustExec("admin cancel ddl jobs " + jobID) - } - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/mockDMLExecutionAddIndexSubTaskFinish", "1*return(true)")) - defer func() { - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/mockDMLExecutionAddIndexSubTaskFinish")) - }() - - require.Error(t, tk.ExecToErr("alter table t add index idx(a);")) - tk.MustExec("admin check table t;") - tk.MustExec("alter table t add index idx2(a);") - tk.MustExec("admin check table t;") - - tk.MustExec(`set global tidb_enable_dist_task=0;`) -} - -func TestAddIndexDistPauseAndResume(t *testing.T) { - store, dom := realtikvtest.CreateMockStoreAndDomainAndSetup(t) - if store.Name() != "TiKV" { - t.Skip("TiKV store only") - } - - tk := testkit.NewTestKit(t, store) - tk1 := testkit.NewTestKit(t, store) - tk.MustExec("drop database if exists test;") - tk.MustExec("create database test;") - tk.MustExec("use test;") - - tk.MustExec("create table t(a bigint auto_random primary key) partition by hash(a) partitions 8;") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("insert into t values (), (), (), (), (), ()") - tk.MustExec("split table t between (3) and (8646911284551352360) regions 50;") - - ddl.MockDMLExecutionAddIndexSubTaskFinish = func() { - row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() - require.Equal(t, 1, len(row)) - jobID := row[0][0].(string) - tk1.MustExec("admin pause ddl jobs " + jobID) - } - - dispatcher.MockDMLExecutionOnPausedState = func(task *proto.Task) { - row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() - require.Equal(t, 1, len(row)) - jobID := row[0][0].(string) - tk1.MustExec("admin resume ddl jobs " + jobID) - } - - ddl.MockDMLExecutionOnTaskFinished = func() { - row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() - require.Equal(t, 1, len(row)) - jobID := row[0][0].(string) - tk1.MustExec("admin pause ddl jobs " + jobID) - } - - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/mockDMLExecutionAddIndexSubTaskFinish", "3*return(true)")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/disttask/framework/dispatcher/mockDMLExecutionOnPausedState", "return(true)")) - tk.MustExec(`set global tidb_enable_dist_task=1;`) - tk.MustExec("alter table t add index idx1(a);") - tk.MustExec("admin check table t;") - tk.MustExec("alter table t add index idx2(a);") - tk.MustExec("admin check table t;") - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/mockDMLExecutionAddIndexSubTaskFinish")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/disttask/framework/dispatcher/mockDMLExecutionOnPausedState")) - - // dist task succeed, job paused and resumed. - var hook = &callback.TestDDLCallback{Do: dom} - var resumeFunc = func(job *model.Job) { - if job.IsPaused() { - row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() - require.Equal(t, 1, len(row)) - jobID := row[0][0].(string) - tk1.MustExec("admin resume ddl jobs " + jobID) - } - } - hook.OnJobUpdatedExported.Store(&resumeFunc) - dom.DDL().SetHook(hook.Clone()) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/pauseAfterDistTaskSuccess", "1*return(true)")) - tk.MustExec("alter table t add index idx3(a);") - tk.MustExec("admin check table t;") - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/pauseAfterDistTaskSuccess")) - tk.MustExec(`set global tidb_enable_dist_task=0;`) -} diff --git a/tests/realtikvtest/addindextest/multi_schema_change_test.go b/tests/realtikvtest/addindextest/multi_schema_change_test.go index e935a612e89ea..873ca4e173930 100644 --- a/tests/realtikvtest/addindextest/multi_schema_change_test.go +++ b/tests/realtikvtest/addindextest/multi_schema_change_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 PingCAP, Inc. +// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/tests/realtikvtest/addindextest1/BUILD.bazel b/tests/realtikvtest/addindextest1/BUILD.bazel new file mode 100644 index 0000000000000..2e806d9556b52 --- /dev/null +++ b/tests/realtikvtest/addindextest1/BUILD.bazel @@ -0,0 +1,19 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "addindextest1_test", + timeout = "short", + srcs = [ + "disttask_test.go", + "main_test.go", + ], + flaky = True, + deps = [ + "//pkg/config", + "//pkg/ddl", + "//pkg/testkit", + "//tests/realtikvtest", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//require", + ], +) diff --git a/tests/realtikvtest/addindextest1/disttask_test.go b/tests/realtikvtest/addindextest1/disttask_test.go new file mode 100644 index 0000000000000..fa5c07861ad9d --- /dev/null +++ b/tests/realtikvtest/addindextest1/disttask_test.go @@ -0,0 +1,191 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package addindextest + +import ( + "testing" + + "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/pkg/ddl" + "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/tests/realtikvtest" + "github.com/stretchr/testify/require" +) + +func init() { + config.UpdateGlobal(func(conf *config.Config) { + conf.Path = "127.0.0.1:2379" + }) +} + +func TestAddIndexDistBasic(t *testing.T) { + store := realtikvtest.CreateMockStoreAndSetup(t) + if store.Name() != "TiKV" { + t.Skip("TiKV store only") + } + + tk := testkit.NewTestKit(t, store) + tk.MustExec("drop database if exists test;") + tk.MustExec("create database test;") + tk.MustExec("use test;") + tk.MustExec(`set global tidb_enable_dist_task=1;`) + + tk.MustExec("create table t(a bigint auto_random primary key) partition by hash(a) partitions 20;") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("split table t between (3) and (8646911284551352360) regions 50;") + tk.MustExec("alter table t add index idx(a);") + tk.MustExec("admin check index t idx;") + + tk.MustExec("create table t1(a bigint auto_random primary key);") + tk.MustExec("insert into t1 values (), (), (), (), (), ()") + tk.MustExec("insert into t1 values (), (), (), (), (), ()") + tk.MustExec("insert into t1 values (), (), (), (), (), ()") + tk.MustExec("insert into t1 values (), (), (), (), (), ()") + tk.MustExec("split table t1 between (3) and (8646911284551352360) regions 50;") + tk.MustExec("alter table t1 add index idx(a);") + tk.MustExec("admin check index t1 idx;") + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/disttask/framework/scheduler/MockRunSubtaskContextCanceled", "1*return(true)")) + tk.MustExec("alter table t1 add index idx1(a);") + tk.MustExec("admin check index t1 idx1;") + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/disttask/framework/scheduler/MockRunSubtaskContextCanceled")) + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/injectPanicForTableScan", "return()")) + tk.MustExecToErr("alter table t1 add index idx2(a);") + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/injectPanicForTableScan")) + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/injectPanicForIndexIngest", "return()")) + tk.MustExecToErr("alter table t1 add index idx2(a);") + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/injectPanicForIndexIngest")) + + tk.MustExec(`set global tidb_enable_dist_task=0;`) +} + +func TestAddIndexDistCancel(t *testing.T) { + store := realtikvtest.CreateMockStoreAndSetup(t) + if store.Name() != "TiKV" { + t.Skip("TiKV store only") + } + + tk := testkit.NewTestKit(t, store) + tk1 := testkit.NewTestKit(t, store) + tk.MustExec("drop database if exists test;") + tk.MustExec("create database test;") + tk.MustExec("use test;") + tk.MustExec(`set global tidb_enable_dist_task=1;`) + + tk.MustExec("create table t(a bigint auto_random primary key) partition by hash(a) partitions 8;") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("insert into t values (), (), (), (), (), ()") + tk.MustExec("split table t between (3) and (8646911284551352360) regions 50;") + + ddl.MockDMLExecutionAddIndexSubTaskFinish = func() { + row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() + require.Equal(t, 1, len(row)) + jobID := row[0][0].(string) + tk1.MustExec("admin cancel ddl jobs " + jobID) + } + + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/mockDMLExecutionAddIndexSubTaskFinish", "1*return(true)")) + defer func() { + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/mockDMLExecutionAddIndexSubTaskFinish")) + }() + + require.Error(t, tk.ExecToErr("alter table t add index idx(a);")) + tk.MustExec("admin check table t;") + tk.MustExec("alter table t add index idx2(a);") + tk.MustExec("admin check table t;") + + tk.MustExec(`set global tidb_enable_dist_task=0;`) +} + +// TODO: flaky test which can't find the root cause, will run it later. +// func TestAddIndexDistPauseAndResume(t *testing.T) { +// store, dom := realtikvtest.CreateMockStoreAndDomainAndSetup(t) +// if store.Name() != "TiKV" { +// t.Skip("TiKV store only") +// } + +// tk := testkit.NewTestKit(t, store) +// tk1 := testkit.NewTestKit(t, store) +// tk.MustExec("drop database if exists test;") +// tk.MustExec("create database test;") +// tk.MustExec("use test;") + +// tk.MustExec("create table t(a bigint auto_random primary key) partition by hash(a) partitions 8;") +// tk.MustExec("insert into t values (), (), (), (), (), ()") +// tk.MustExec("insert into t values (), (), (), (), (), ()") +// tk.MustExec("insert into t values (), (), (), (), (), ()") +// tk.MustExec("insert into t values (), (), (), (), (), ()") +// tk.MustExec("split table t between (3) and (8646911284551352360) regions 50;") + +// ddl.MockDMLExecutionAddIndexSubTaskFinish = func() { +// row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() +// require.Equal(t, 1, len(row)) +// jobID := row[0][0].(string) +// tk1.MustExec("admin pause ddl jobs " + jobID) +// <-ddl.TestSyncChan +// } + +// dispatcher.MockDMLExecutionOnPausedState = func(task *proto.Task) { +// row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() +// require.Equal(t, 1, len(row)) +// jobID := row[0][0].(string) +// tk1.MustExec("admin resume ddl jobs " + jobID) +// } + +// ddl.MockDMLExecutionOnTaskFinished = func() { +// row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() +// require.Equal(t, 1, len(row)) +// jobID := row[0][0].(string) +// tk1.MustExec("admin pause ddl jobs " + jobID) +// } + +// require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/mockDMLExecutionAddIndexSubTaskFinish", "3*return(true)")) +// require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/disttask/framework/dispatcher/mockDMLExecutionOnPausedState", "return(true)")) +// require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/syncDDLTaskPause", "return()")) +// tk.MustExec(`set global tidb_enable_dist_task=1;`) +// tk.MustExec("alter table t add index idx1(a);") +// tk.MustExec("admin check table t;") +// require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/mockDMLExecutionAddIndexSubTaskFinish")) +// require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/disttask/framework/dispatcher/mockDMLExecutionOnPausedState")) + +// // dist task succeed, job paused and resumed. +// var hook = &callback.TestDDLCallback{Do: dom} +// var resumeFunc = func(job *model.Job) { +// if job.IsPaused() { +// row := tk1.MustQuery("select job_id from mysql.tidb_ddl_job").Rows() +// require.Equal(t, 1, len(row)) +// jobID := row[0][0].(string) +// tk1.MustExec("admin resume ddl jobs " + jobID) +// } +// } +// hook.OnJobUpdatedExported.Store(&resumeFunc) +// dom.DDL().SetHook(hook.Clone()) +// require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/pkg/ddl/pauseAfterDistTaskFinished", "1*return(true)")) +// tk.MustExec("alter table t add index idx3(a);") +// tk.MustExec("admin check table t;") +// require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/pauseAfterDistTaskFinished")) +// require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/pkg/ddl/syncDDLTaskPause")) + +// tk.MustExec(`set global tidb_enable_dist_task=0;`) +// } diff --git a/tests/realtikvtest/addindextest1/main_test.go b/tests/realtikvtest/addindextest1/main_test.go new file mode 100644 index 0000000000000..30ba7d0424a87 --- /dev/null +++ b/tests/realtikvtest/addindextest1/main_test.go @@ -0,0 +1,34 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package addindextest + +import ( + "flag" + "testing" + + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/tests/realtikvtest" +) + +// FullMode is a flag identify it should be run in full mode. +// In full mode, the test will run all the cases. +var FullMode = flag.Bool("full-mode", false, "whether tests run in full mode") + +func TestMain(m *testing.M) { + config.UpdateGlobal(func(conf *config.Config) { + conf.Store = "tikv" + }) + realtikvtest.RunTestMain(m) +} diff --git a/tests/realtikvtest/addindextest2/BUILD.bazel b/tests/realtikvtest/addindextest2/BUILD.bazel new file mode 100644 index 0000000000000..9caa91fe19679 --- /dev/null +++ b/tests/realtikvtest/addindextest2/BUILD.bazel @@ -0,0 +1,26 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "addindextest2_test", + timeout = "long", + srcs = [ + "global_sort_test.go", + "main_test.go", + ], + flaky = True, + deps = [ + "//br/pkg/lightning/backend/external", + "//br/pkg/storage", + "//pkg/config", + "//pkg/ddl/util/callback", + "//pkg/disttask/framework/dispatcher", + "//pkg/parser/model", + "//pkg/sessionctx/variable", + "//pkg/testkit", + "//tests/realtikvtest", + "@com_github_fsouza_fake_gcs_server//fakestorage", + "@com_github_phayes_freeport//:freeport", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//require", + ], +) diff --git a/tests/realtikvtest/addindextest/global_sort_test.go b/tests/realtikvtest/addindextest2/global_sort_test.go similarity index 97% rename from tests/realtikvtest/addindextest/global_sort_test.go rename to tests/realtikvtest/addindextest2/global_sort_test.go index 7608ece94117f..7669693fb1b1c 100644 --- a/tests/realtikvtest/addindextest/global_sort_test.go +++ b/tests/realtikvtest/addindextest2/global_sort_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package addindextest_test +package addindextest import ( "context" @@ -26,6 +26,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/br/pkg/lightning/backend/external" "github.com/pingcap/tidb/br/pkg/storage" + "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/ddl/util/callback" "github.com/pingcap/tidb/pkg/disttask/framework/dispatcher" "github.com/pingcap/tidb/pkg/parser/model" @@ -35,6 +36,12 @@ import ( "github.com/stretchr/testify/require" ) +func init() { + config.UpdateGlobal(func(conf *config.Config) { + conf.Path = "127.0.0.1:2379" + }) +} + func genStorageURI(t *testing.T) (host string, port uint16, uri string) { gcsHost := "127.0.0.1" // for fake gcs server, we must use this endpoint format diff --git a/tests/realtikvtest/addindextest2/main_test.go b/tests/realtikvtest/addindextest2/main_test.go new file mode 100644 index 0000000000000..30ba7d0424a87 --- /dev/null +++ b/tests/realtikvtest/addindextest2/main_test.go @@ -0,0 +1,34 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package addindextest + +import ( + "flag" + "testing" + + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/tests/realtikvtest" +) + +// FullMode is a flag identify it should be run in full mode. +// In full mode, the test will run all the cases. +var FullMode = flag.Bool("full-mode", false, "whether tests run in full mode") + +func TestMain(m *testing.M) { + config.UpdateGlobal(func(conf *config.Config) { + conf.Store = "tikv" + }) + realtikvtest.RunTestMain(m) +} diff --git a/tests/realtikvtest/addindextest3/BUILD.bazel b/tests/realtikvtest/addindextest3/BUILD.bazel new file mode 100644 index 0000000000000..a8251228ef5ba --- /dev/null +++ b/tests/realtikvtest/addindextest3/BUILD.bazel @@ -0,0 +1,31 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "addindextest3_test", + timeout = "short", + srcs = [ + "main_test.go", + "operator_test.go", + ], + flaky = True, + deps = [ + "//pkg/config", + "//pkg/ddl", + "//pkg/ddl/copr", + "//pkg/ddl/ingest", + "//pkg/disttask/operator", + "//pkg/domain", + "//pkg/kv", + "//pkg/parser/model", + "//pkg/sessionctx", + "//pkg/table", + "//pkg/table/tables", + "//pkg/testkit", + "//pkg/util/chunk", + "//tests/realtikvtest", + "@com_github_ngaut_pools//:pools", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//require", + "@org_golang_x_sync//errgroup", + ], +) diff --git a/tests/realtikvtest/addindextest3/main_test.go b/tests/realtikvtest/addindextest3/main_test.go new file mode 100644 index 0000000000000..30ba7d0424a87 --- /dev/null +++ b/tests/realtikvtest/addindextest3/main_test.go @@ -0,0 +1,34 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package addindextest + +import ( + "flag" + "testing" + + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/tests/realtikvtest" +) + +// FullMode is a flag identify it should be run in full mode. +// In full mode, the test will run all the cases. +var FullMode = flag.Bool("full-mode", false, "whether tests run in full mode") + +func TestMain(m *testing.M) { + config.UpdateGlobal(func(conf *config.Config) { + conf.Store = "tikv" + }) + realtikvtest.RunTestMain(m) +} diff --git a/tests/realtikvtest/addindextest/operator_test.go b/tests/realtikvtest/addindextest3/operator_test.go similarity index 98% rename from tests/realtikvtest/addindextest/operator_test.go rename to tests/realtikvtest/addindextest3/operator_test.go index 2eb087c381a92..8da0a6a886106 100644 --- a/tests/realtikvtest/addindextest/operator_test.go +++ b/tests/realtikvtest/addindextest3/operator_test.go @@ -22,6 +22,7 @@ import ( "github.com/ngaut/pools" "github.com/pingcap/failpoint" + "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/copr" "github.com/pingcap/tidb/pkg/ddl/ingest" @@ -39,6 +40,12 @@ import ( "golang.org/x/sync/errgroup" ) +func init() { + config.UpdateGlobal(func(conf *config.Config) { + conf.Path = "127.0.0.1:2379" + }) +} + func TestBackfillOperators(t *testing.T) { store, dom := realtikvtest.CreateMockStoreAndDomainAndSetup(t) tk := testkit.NewTestKit(t, store) diff --git a/tests/realtikvtest/addindextest4/BUILD.bazel b/tests/realtikvtest/addindextest4/BUILD.bazel new file mode 100644 index 0000000000000..a5978813afa80 --- /dev/null +++ b/tests/realtikvtest/addindextest4/BUILD.bazel @@ -0,0 +1,27 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_test") + +go_test( + name = "addindextest4_test", + timeout = "short", + srcs = [ + "ingest_test.go", + "main_test.go", + ], + flaky = True, + deps = [ + "//br/pkg/lightning/backend/local", + "//pkg/config", + "//pkg/ddl", + "//pkg/ddl/ingest", + "//pkg/ddl/testutil", + "//pkg/ddl/util/callback", + "//pkg/errno", + "//pkg/parser/model", + "//pkg/sessionctx/variable", + "//pkg/testkit", + "//tests/realtikvtest", + "@com_github_pingcap_failpoint//:failpoint", + "@com_github_stretchr_testify//assert", + "@com_github_stretchr_testify//require", + ], +) diff --git a/tests/realtikvtest/addindextest/integration_test.go b/tests/realtikvtest/addindextest4/ingest_test.go similarity index 99% rename from tests/realtikvtest/addindextest/integration_test.go rename to tests/realtikvtest/addindextest4/ingest_test.go index b42fc23cc1192..cd01025a33f58 100644 --- a/tests/realtikvtest/addindextest/integration_test.go +++ b/tests/realtikvtest/addindextest4/ingest_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 PingCAP, Inc. +// Copyright 2023 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -24,6 +24,7 @@ import ( "github.com/pingcap/failpoint" "github.com/pingcap/tidb/br/pkg/lightning/backend/local" + "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/ddl" "github.com/pingcap/tidb/pkg/ddl/ingest" "github.com/pingcap/tidb/pkg/ddl/testutil" @@ -37,6 +38,12 @@ import ( "github.com/stretchr/testify/require" ) +func init() { + config.UpdateGlobal(func(conf *config.Config) { + conf.Path = "127.0.0.1:2379" + }) +} + func TestAddIndexIngestMemoryUsage(t *testing.T) { store := realtikvtest.CreateMockStoreAndSetup(t) tk := testkit.NewTestKit(t, store) diff --git a/tests/realtikvtest/addindextest4/main_test.go b/tests/realtikvtest/addindextest4/main_test.go new file mode 100644 index 0000000000000..30ba7d0424a87 --- /dev/null +++ b/tests/realtikvtest/addindextest4/main_test.go @@ -0,0 +1,34 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package addindextest + +import ( + "flag" + "testing" + + "github.com/pingcap/tidb/pkg/config" + "github.com/pingcap/tidb/tests/realtikvtest" +) + +// FullMode is a flag identify it should be run in full mode. +// In full mode, the test will run all the cases. +var FullMode = flag.Bool("full-mode", false, "whether tests run in full mode") + +func TestMain(m *testing.M) { + config.UpdateGlobal(func(conf *config.Config) { + conf.Store = "tikv" + }) + realtikvtest.RunTestMain(m) +} diff --git a/tests/realtikvtest/brietest/main_test.go b/tests/realtikvtest/brietest/main_test.go index cb7011d295488..91ad1d36f2ece 100644 --- a/tests/realtikvtest/brietest/main_test.go +++ b/tests/realtikvtest/brietest/main_test.go @@ -30,6 +30,7 @@ func TestMain(m *testing.M) { goleak.IgnoreTopFunction("google.golang.org/grpc.(*ccBalancerWrapper).watcher"), goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*http2Client).keepalive"), goleak.IgnoreTopFunction("google.golang.org/grpc/internal/transport.(*controlBuffer).get"), + goleak.IgnoreTopFunction("google.golang.org/grpc/internal/grpcsync.(*CallbackSerializer).run"), goleak.IgnoreTopFunction("net/http.(*persistConn).writeLoop"), goleak.IgnoreTopFunction("internal/poll.runtime_pollWait"), } diff --git a/tests/realtikvtest/importintotest/BUILD.bazel b/tests/realtikvtest/importintotest/BUILD.bazel index 64abe375bc580..5b25bb7e2d739 100644 --- a/tests/realtikvtest/importintotest/BUILD.bazel +++ b/tests/realtikvtest/importintotest/BUILD.bazel @@ -5,7 +5,6 @@ go_test( timeout = "long", srcs = [ "detach_test.go", - "from_server_test.go", "import_into_test.go", "job_test.go", "main_test.go", diff --git a/tests/realtikvtest/importintotest/from_server_test.go b/tests/realtikvtest/importintotest/from_server_test.go deleted file mode 100644 index 8ac7597a4c49d..0000000000000 --- a/tests/realtikvtest/importintotest/from_server_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2023 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package importintotest - -import ( - "fmt" - "os" - "path" - - "github.com/pingcap/tidb/pkg/testkit" - "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" -) - -func (s *mockGCSSuite) TestImportFromServer() { - tempDir := s.T().TempDir() - var allData []string - for i := 0; i < 3; i++ { - fileName := fmt.Sprintf("server-%d.csv", i) - var content []byte - rowCnt := 2 - for j := 0; j < rowCnt; j++ { - content = append(content, []byte(fmt.Sprintf("%d,test-%d\n", i*rowCnt+j, i*rowCnt+j))...) - allData = append(allData, fmt.Sprintf("%d test-%d", i*rowCnt+j, i*rowCnt+j)) - } - s.NoError(os.WriteFile(path.Join(tempDir, fileName), content, 0o644)) - } - // directory without permission - s.NoError(os.MkdirAll(path.Join(tempDir, "no-perm"), 0o700)) - s.NoError(os.WriteFile(path.Join(tempDir, "no-perm", "no-perm.csv"), []byte("1,1"), 0o644)) - s.NoError(os.Chmod(path.Join(tempDir, "no-perm"), 0o000)) - s.T().Cleanup(func() { - // make sure TempDir RemoveAll cleanup works - _ = os.Chmod(path.Join(tempDir, "no-perm"), 0o700) - }) - // file without permission - s.NoError(os.WriteFile(path.Join(tempDir, "no-perm.csv"), []byte("1,1"), 0o644)) - s.NoError(os.Chmod(path.Join(tempDir, "no-perm.csv"), 0o000)) - - s.prepareAndUseDB("from_server") - s.tk.MustExec("create table t (a bigint, b varchar(100));") - - // relative path - err2 := s.tk.QueryToErr("IMPORT INTO t FROM '~/file.csv'") - s.ErrorIs(err2, exeerrors.ErrLoadDataInvalidURI) - s.ErrorContains(err2, "URI of data source is invalid") - // no suffix or wrong suffix - s.ErrorIs(s.tk.QueryToErr("IMPORT INTO t FROM '/file'"), exeerrors.ErrLoadDataInvalidURI) - s.ErrorIs(s.tk.QueryToErr("IMPORT INTO t FROM '/file.txt'"), exeerrors.ErrLoadDataInvalidURI) - // non-exist parent directory - err := s.tk.QueryToErr("IMPORT INTO t FROM '/path/to/non/exists/file.csv'") - s.ErrorIs(err, exeerrors.ErrLoadDataInvalidURI) - s.ErrorContains(err, "no such file or directory") - // without permission to parent dir - err = s.tk.QueryToErr(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "no-perm", "no-perm.csv"))) - s.ErrorIs(err, exeerrors.ErrLoadDataCantRead) - s.ErrorContains(err, "permission denied") - // file not exists - err = s.tk.QueryToErr(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "not-exists.csv"))) - s.ErrorIs(err, exeerrors.ErrLoadDataCantRead) - s.ErrorContains(err, "no such file or directory") - // file without permission - err = s.tk.QueryToErr(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "no-perm.csv"))) - s.ErrorIs(err, exeerrors.ErrLoadDataCantRead) - s.ErrorContains(err, "permission denied") - - s.tk.MustQuery(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "server-0.csv"))) - s.tk.MustQuery("SELECT * FROM t;").Sort().Check(testkit.Rows([]string{"0 test-0", "1 test-1"}...)) - - s.tk.MustExec("truncate table t") - // we don't have read access to 'no-perm' directory, so walk-dir fails - err = s.tk.QueryToErr(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "server-*.csv"))) - s.ErrorIs(err, exeerrors.ErrLoadDataCantRead) - s.ErrorContains(err, "permission denied") - - s.NoError(os.Chmod(path.Join(tempDir, "no-perm"), 0o400)) - s.tk.MustQuery(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "server-*.csv"))) - s.tk.MustQuery("SELECT * FROM t;").Sort().Check(testkit.Rows(allData...)) -} diff --git a/tests/realtikvtest/importintotest/import_into_test.go b/tests/realtikvtest/importintotest/import_into_test.go index 50967db18d1a3..95519e0f81378 100644 --- a/tests/realtikvtest/importintotest/import_into_test.go +++ b/tests/realtikvtest/importintotest/import_into_test.go @@ -655,7 +655,9 @@ func (s *mockGCSSuite) TestMaxWriteSpeed() { start := time.Now() sql := fmt.Sprintf(`IMPORT INTO load_test_write_speed.t FROM 'gs://test-load/speed-test.csv?endpoint=%s'`, gcsEndpoint) - s.tk.MustQuery(sql) + result := s.tk.MustQuery(sql) + fileSize := result.Rows()[0][6].(string) + s.Equal("7.598KiB", fileSize) duration := time.Since(start).Seconds() s.tk.MustQuery("SELECT count(1) FROM load_test_write_speed.t;").Check(testkit.Rows( strconv.Itoa(lineCount), diff --git a/tests/realtikvtest/importintotest/job_test.go b/tests/realtikvtest/importintotest/job_test.go index b2a5a49dc6b55..cc9e6e796f09e 100644 --- a/tests/realtikvtest/importintotest/job_test.go +++ b/tests/realtikvtest/importintotest/job_test.go @@ -57,7 +57,7 @@ func (s *mockGCSSuite) compareJobInfoWithoutTime(jobInfo *importer.JobInfo, row s.Equal(strconv.Itoa(int(jobInfo.TableID)), row[3]) s.Equal(jobInfo.Step, row[4]) s.Equal(jobInfo.Status, row[5]) - s.Equal(units.HumanSize(float64(jobInfo.SourceFileSize)), row[6]) + s.Equal(units.BytesSize(float64(jobInfo.SourceFileSize)), row[6]) if jobInfo.Summary == nil { s.Equal("", row[7].(string)) } else { diff --git a/tests/realtikvtest/importintotest3/BUILD.bazel b/tests/realtikvtest/importintotest3/BUILD.bazel index bc7bc64b036c7..36bd3fb77d969 100644 --- a/tests/realtikvtest/importintotest3/BUILD.bazel +++ b/tests/realtikvtest/importintotest3/BUILD.bazel @@ -5,6 +5,7 @@ go_test( timeout = "moderate", srcs = [ "file_compression_test.go", + "from_server_test.go", "main_test.go", ], flaky = True, diff --git a/tests/realtikvtest/importintotest3/from_server_test.go b/tests/realtikvtest/importintotest3/from_server_test.go new file mode 100644 index 0000000000000..be525dd868391 --- /dev/null +++ b/tests/realtikvtest/importintotest3/from_server_test.go @@ -0,0 +1,58 @@ +// Copyright 2023 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package importintotest + +import ( + "fmt" + "os" + "path" + + "github.com/pingcap/tidb/br/pkg/lightning/mydump" + "github.com/pingcap/tidb/pkg/testkit" +) + +func (s *mockGCSSuite) TestImportFromServer() { + tempDir := s.T().TempDir() + var allData []string + for i := 0; i < 3; i++ { + fileName := fmt.Sprintf("server-%d.csv", i) + var content []byte + rowCnt := 2 + for j := 0; j < rowCnt; j++ { + content = append(content, []byte(fmt.Sprintf("%d,test-%d\n", i*rowCnt+j, i*rowCnt+j))...) + allData = append(allData, fmt.Sprintf("%d test-%d", i*rowCnt+j, i*rowCnt+j)) + } + s.NoError(os.WriteFile(path.Join(tempDir, fileName), content, 0o644)) + } + + s.prepareAndUseDB("from_server") + s.tk.MustExec("create table t (a bigint, b varchar(100));") + + s.tk.MustQuery(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "server-0.csv"))) + s.tk.MustQuery("SELECT * FROM t;").Sort().Check(testkit.Rows([]string{"0 test-0", "1 test-1"}...)) + + s.tk.MustExec("truncate table t") + s.tk.MustQuery(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "server-*.csv"))) + s.tk.MustQuery("SELECT * FROM t;").Sort().Check(testkit.Rows(allData...)) + + // try a gzip file + s.NoError(os.WriteFile( + path.Join(tempDir, "test.csv.gz"), + s.getCompressedData(mydump.CompressionGZ, []byte("1,test1\n2,test2")), + 0o644)) + s.tk.MustExec("truncate table t") + s.tk.MustQuery(fmt.Sprintf("IMPORT INTO t FROM '%s'", path.Join(tempDir, "test.csv.gz"))) + s.tk.MustQuery("SELECT * FROM t;").Sort().Check(testkit.Rows([]string{"1 test1", "2 test2"}...)) +} diff --git a/tests/realtikvtest/pessimistictest/BUILD.bazel b/tests/realtikvtest/pessimistictest/BUILD.bazel index 7810a11bc791f..f2f71713d2670 100644 --- a/tests/realtikvtest/pessimistictest/BUILD.bazel +++ b/tests/realtikvtest/pessimistictest/BUILD.bazel @@ -31,7 +31,9 @@ go_test( "//pkg/testkit/external", "//pkg/types", "//pkg/util/codec", + "//pkg/util/dbterror/exeerrors", "//pkg/util/deadlockhistory", + "//pkg/util/sqlkiller", "//tests/realtikvtest", "@com_github_pingcap_errors//:errors", "@com_github_pingcap_failpoint//:failpoint", diff --git a/tests/realtikvtest/pessimistictest/pessimistic_test.go b/tests/realtikvtest/pessimistictest/pessimistic_test.go index 09df963b08d55..886669e5dd3b3 100644 --- a/tests/realtikvtest/pessimistictest/pessimistic_test.go +++ b/tests/realtikvtest/pessimistictest/pessimistic_test.go @@ -48,7 +48,9 @@ import ( "github.com/pingcap/tidb/pkg/testkit/external" "github.com/pingcap/tidb/pkg/types" "github.com/pingcap/tidb/pkg/util/codec" + "github.com/pingcap/tidb/pkg/util/dbterror/exeerrors" "github.com/pingcap/tidb/pkg/util/deadlockhistory" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/pingcap/tidb/tests/realtikvtest" "github.com/stretchr/testify/require" "github.com/tikv/client-go/v2/oracle" @@ -287,10 +289,10 @@ func TestSingleStatementRollback(t *testing.T) { tableStart := tablecodec.GenTableRecordPrefix(tblID) cluster.SplitKeys(tableStart, tableStart.PrefixNext(), 2) region1Key := codec.EncodeBytes(nil, tablecodec.EncodeRowKeyWithHandle(tblID, kv.IntHandle(1))) - region1, _, _ := cluster.GetRegionByKey(region1Key) + region1, _, _, _ := cluster.GetRegionByKey(region1Key) region1ID := region1.Id region2Key := codec.EncodeBytes(nil, tablecodec.EncodeRowKeyWithHandle(tblID, kv.IntHandle(3))) - region2, _, _ := cluster.GetRegionByKey(region2Key) + region2, _, _, _ := cluster.GetRegionByKey(region2Key) region2ID := region2.Id syncCh := make(chan bool) @@ -728,8 +730,8 @@ func TestWaitLockKill(t *testing.T) { go func() { time.Sleep(500 * time.Millisecond) sessVars := tk2.Session().GetSessionVars() - succ := atomic.CompareAndSwapUint32(&sessVars.Killed, 0, 1) - require.True(t, succ) + sessVars.SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) + require.True(t, exeerrors.ErrQueryInterrupted.Equal(sessVars.SQLKiller.HandleSignal())) // Send success. wg.Wait() }() _, err := tk2.Exec("update test_kill set c = c + 1 where id = 1") @@ -756,13 +758,12 @@ func TestKillStopTTLManager(t *testing.T) { tk2.MustExec("begin pessimistic") tk.MustQuery("select * from test_kill where id = 1 for update") sessVars := tk.Session().GetSessionVars() - succ := atomic.CompareAndSwapUint32(&sessVars.Killed, 0, 1) - require.True(t, succ) + sessVars.SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) + require.True(t, exeerrors.ErrQueryInterrupted.Equal(sessVars.SQLKiller.HandleSignal())) // Send success. // This query should success rather than returning a ResolveLock error. tk2.MustExec("update test_kill set c = c + 1 where id = 1") - succ = atomic.CompareAndSwapUint32(&sessVars.Killed, 1, 0) - require.True(t, succ) + sessVars.SQLKiller.Reset() tk.MustExec("rollback") tk2.MustExec("rollback") } @@ -1716,13 +1717,13 @@ func TestKillWaitLockTxn(t *testing.T) { time.Sleep(100 * time.Millisecond) sessVars := tk.Session().GetSessionVars() // lock query in tk is killed, the ttl manager will stop - succ := atomic.CompareAndSwapUint32(&sessVars.Killed, 0, 1) - require.True(t, succ) + sessVars.SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) + require.True(t, exeerrors.ErrQueryInterrupted.Equal(sessVars.SQLKiller.HandleSignal())) // Send success. err := <-errCh require.NoError(t, err) _, _ = tk.Exec("rollback") // reset kill - atomic.CompareAndSwapUint32(&sessVars.Killed, 1, 0) + sessVars.SQLKiller.Reset() tk.MustExec("rollback") tk2.MustExec("rollback") } diff --git a/tests/realtikvtest/sessiontest/BUILD.bazel b/tests/realtikvtest/sessiontest/BUILD.bazel index 579b68fd5d194..e87a99c3ff1ee 100644 --- a/tests/realtikvtest/sessiontest/BUILD.bazel +++ b/tests/realtikvtest/sessiontest/BUILD.bazel @@ -22,6 +22,7 @@ go_test( "//pkg/sessionctx/variable", "//pkg/testkit", "//pkg/util", + "//pkg/util/sqlkiller", "//tests/realtikvtest", "@com_github_pingcap_failpoint//:failpoint", "@com_github_stretchr_testify//require", diff --git a/tests/realtikvtest/sessiontest/session_fail_test.go b/tests/realtikvtest/sessiontest/session_fail_test.go index 8c0952eb4fb59..f56bc952c7483 100644 --- a/tests/realtikvtest/sessiontest/session_fail_test.go +++ b/tests/realtikvtest/sessiontest/session_fail_test.go @@ -19,12 +19,14 @@ import ( "fmt" "strconv" "testing" + "time" "github.com/pingcap/failpoint" "github.com/pingcap/tidb/pkg/config" "github.com/pingcap/tidb/pkg/session" "github.com/pingcap/tidb/pkg/sessionctx/variable" "github.com/pingcap/tidb/pkg/testkit" + "github.com/pingcap/tidb/pkg/util/sqlkiller" "github.com/pingcap/tidb/tests/realtikvtest" "github.com/stretchr/testify/require" ) @@ -109,10 +111,13 @@ func TestKillFlagInBackoff(t *testing.T) { tk.Session().GetSessionVars().EnableClusteredIndex = variable.ClusteredIndexDefModeOn tk.MustExec("create table kill_backoff (id int)") // Inject 1 time timeout. If `Killed` is not successfully passed, it will retry and complete query. - require.NoError(t, failpoint.Enable("tikvclient/tikvStoreSendReqResult", `return("timeout")->return("")`)) + require.NoError(t, failpoint.Enable("tikvclient/tikvStoreSendReqResult", `sleep(1000)->return("timeout")->return("")`)) defer failpoint.Disable("tikvclient/tikvStoreSendReqResult") // Set kill flag and check its passed to backoffer. - tk.Session().GetSessionVars().Killed = 1 + go func() { + time.Sleep(300 * time.Millisecond) + tk.Session().GetSessionVars().SQLKiller.SendKillSignal(sqlkiller.QueryInterrupted) + }() rs, err := tk.Exec("select * from kill_backoff") require.NoError(t, err) _, err = session.ResultSetToStringSlice(context.TODO(), tk.Session(), rs)