diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml new file mode 100644 index 0000000000..9820059e6d --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler.yml @@ -0,0 +1,140 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_scheduler) +on: + pull_request: + workflow_dispatch: + push: + branches: + - main +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_scheduler)') + cancel-in-progress: true + +env: + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend OnlineDDL tests on Cluster + runs-on: ubuntu-22.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_onlineddl_scheduler.yml' + - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v3 + with: + go-version: 1.20.1 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + # Setup MySQL 8.0 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + # Install everything else we need, and configure + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -x + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + + # print test output + cat output.txt + - name: Check test results + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + # check if any test failed + if grep -q -- '--- FAIL' output.txt; then + echo "Testcase is failed" + exit 1 + elif grep -q -- 'FAIL vitess.io' output.txt; then + echo "Testcase is failed" + exit 1 + else + echo "Testcase is successful" + fi diff --git a/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml new file mode 100644 index 0000000000..c100e8070f --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml @@ -0,0 +1,151 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_scheduler) mysql57 +on: +# pull_request: + workflow_dispatch: + push: + branches: + - main +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_scheduler) mysql57') + cancel-in-progress: true + +env: + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (onlineddl_scheduler) mysql57 + runs-on: ubuntu-22.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_onlineddl_scheduler_mysql57.yml' + - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v3 + with: + go-version: 1.20.1 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo apt-get update + + # Uninstall any previously installed MySQL first + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 + # packages for Jammy. + echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections + echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 + + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -x + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard onlineddl_scheduler | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + # print test output + cat output.txt + + - name: Check test results + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + # check if any test failed + if grep -q -- '--- FAIL' output.txt; then + echo "Testcase is failed" + exit 1 + elif grep -q -- 'FAIL vitess.io' output.txt; then + echo "Testcase is failed" + exit 1 + else + echo "Testcase is successful" + fi \ No newline at end of file diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml new file mode 100644 index 0000000000..c15c0d99e1 --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml @@ -0,0 +1,141 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_vrepl) +on: +# pull_request: + workflow_dispatch: + push: + branches: + - main +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl)') + cancel-in-progress: true + +env: + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (onlineddl_vrepl) + runs-on: ubuntu-22.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_onlineddl_vrepl.yml' + - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v3 + with: + go-version: 1.20.1 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + # Setup MySQL 8.0 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + # Install everything else we need, and configure + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata xz-utils libncurses5 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -x + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + + # print test output + cat output.txt + + - name: Check test results + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + # check if any test failed + if grep -q -- '--- FAIL' output.txt; then + echo "Testcase is failed" + exit 1 + elif grep -q -- 'FAIL vitess.io' output.txt; then + echo "Testcase is failed" + exit 1 + else + echo "Testcase is successful" + fi \ No newline at end of file diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml new file mode 100644 index 0000000000..a4e07b1bb2 --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml @@ -0,0 +1,150 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_vrepl) mysql57 +on: + pull_request: + workflow_dispatch: + push: + branches: + - main +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'onlineddl mysql57') + cancel-in-progress: true + +env: + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend OnlineDDL mysql57 tests on Cluster + runs-on: ubuntu-22.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@v3 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: frouioui/paths-filter@main + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_onlineddl_vrepl_mysql57.yml' + - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@v3 + with: + go-version: 1.20.1 + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@v4 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + sudo apt-get update + + # Uninstall any previously installed MySQL first + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 467B942D3A79BD29 + + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.24-1_all.deb + # Bionic packages are still compatible for Jammy since there's no MySQL 5.7 + # packages for Jammy. + echo mysql-apt-config mysql-apt-config/repo-codename select bionic | sudo debconf-set-selections + echo mysql-apt-config mysql-apt-config/select-server select mysql-5.7 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get update + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-client=5.7* mysql-community-server=5.7* mysql-server=5.7* libncurses5 + + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -x + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + # print test output + cat output.txt + - name: Check test results + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + # check if any test failed + if grep -q -- '--- FAIL' output.txt; then + echo "Testcase is failed" + exit 1 + elif grep -q -- 'FAIL vitess.io' output.txt; then + echo "Testcase is failed" + exit 1 + else + echo "Testcase is successful" + fi diff --git a/.github/workflows/cluster_endtoend_wesql.yml b/.github/workflows/cluster_endtoend_wesql.yml index 9914cedbe9..39d095bca4 100644 --- a/.github/workflows/cluster_endtoend_wesql.yml +++ b/.github/workflows/cluster_endtoend_wesql.yml @@ -120,7 +120,6 @@ jobs: # run the tests however you normally do, then produce a JUnit XML file eatmydata -- go run test.go -docker=false -follow -shard wesql | tee -a output.txt | go-junit-report -set-exit-code > report.xml - make failpoint-disable - name: Print test output and Record test result diff --git a/Makefile b/Makefile index a727577f32..e4cc53de5a 100644 --- a/Makefile +++ b/Makefile @@ -208,6 +208,11 @@ unit_test: build dependency_check e2e_test: build tools/wesql_cluster_test.sh +e2e_test_scheduler: build + tools/wesql_onlineddl_scheduler.sh + +e2e_test_vrepl: build + tools/wesql_onlineddl_vrepl.sh .ONESHELL: SHELL = /bin/bash diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index 875ac646d2..73b403ef3e 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -1,3 +1,8 @@ +/* +Copyright ApeCloud, Inc. +Licensed under the Apache v2(found in the LICENSE file in the root directory). +*/ + /* Copyright 2021 The Vitess Authors. @@ -76,7 +81,7 @@ var ( ensureStateNotChangedTime = 5 * time.Second hostname = "localhost" - keyspaceName = "ks" + keyspaceName = "mysql" cell = "zone1" schemaChangeDirectory = "" overrideVtctlParams *cluster.VtctlClientParams @@ -205,7 +210,7 @@ func TestMain(m *testing.M) { } // No need for replicas in this stress test - if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil { + if err := clusterInstance.StartKeyspace(*keyspace, []string{"0"}, 0, false); err != nil { return 1, err } @@ -217,8 +222,9 @@ func TestMain(m *testing.M) { // ensure it is torn down during cluster TearDown clusterInstance.VtgateProcess = *vtgateInstance vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + DbName: "mysql", } return m.Run(), nil @@ -234,12 +240,12 @@ func TestMain(m *testing.M) { func TestSchemaChange(t *testing.T) { t.Run("scheduler", testScheduler) - t.Run("singleton", testSingleton) - t.Run("declarative", testDeclarative) - t.Run("foreign-keys", testForeignKeys) - t.Run("summary: validate sequential migration IDs", func(t *testing.T) { - onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards) - }) + //t.Run("singleton", testSingleton) + //t.Run("declarative", testDeclarative) + //t.Run("foreign-keys", testForeignKeys) + //t.Run("summary: validate sequential migration IDs", func(t *testing.T) { + // onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards) + //}) } func testScheduler(t *testing.T) { @@ -272,7 +278,7 @@ func testScheduler(t *testing.T) { mysqlVersion := onlineddl.GetMySQLVersion(t, clusterInstance.Keyspaces[0].Shards[0].PrimaryTablet()) require.NotEmpty(t, mysqlVersion) - _, capableOf, _ := mysql.GetFlavor(mysqlVersion, nil) + //_, capableOf, _ := mysql.GetFlavor(mysqlVersion, nil) var ( t1uuid string @@ -319,12 +325,12 @@ func testScheduler(t *testing.T) { dropT4Statement = ` DROP TABLE IF EXISTS t4_test ` - alterExtraColumn = ` - ALTER TABLE t1_test ADD COLUMN extra_column int NOT NULL DEFAULT 0 - ` - createViewDependsOnExtraColumn = ` - CREATE VIEW t1_test_view AS SELECT id, extra_column FROM t1_test - ` + //alterExtraColumn = ` + // ALTER TABLE t1_test ADD COLUMN extra_column int NOT NULL DEFAULT 0 + //` + //createViewDependsOnExtraColumn = ` + // CREATE VIEW t1_test_view AS SELECT id, extra_column FROM t1_test + //` ) testReadTimestamp := func(t *testing.T, uuid string, timestampColumn string) (timestamp string) { @@ -425,29 +431,6 @@ func testScheduler(t *testing.T) { } onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusQueued) }) - t.Run("launch irrelevant shards", func(t *testing.T) { - onlineddl.CheckLaunchMigration(t, &vtParams, shards, t1uuid, "x,y,z", false) - time.Sleep(2 * time.Second) - rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - postponeLaunch := row.AsInt64("postpone_launch", 0) - assert.Equal(t, int64(1), postponeLaunch) - } - onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusQueued) - }) - t.Run("launch relevant shard", func(t *testing.T) { - onlineddl.CheckLaunchMigration(t, &vtParams, shards, t1uuid, "x, y, 1", true) - rs := onlineddl.ReadMigrations(t, &vtParams, t1uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - postponeLaunch := row.AsInt64("postpone_launch", 0) - assert.Equal(t, int64(0), postponeLaunch) - } - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) - }) }) t.Run("ALTER both tables non-concurrent", func(t *testing.T) { t1uuid = testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtgate", "", "", true)) // skip wait @@ -657,6 +640,7 @@ func testScheduler(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) }) }) + t.Run("non-concurrent REVERT vs three concurrent drops", func(t *testing.T) { t1uuid = testRevertMigration(t, createRevertParams(t1uuid, ddlStrategy+" -postpone-completion", "vtgate", "", true)) drop3uuid := testOnlineDDLStatement(t, createParams(dropT3Statement, ddlStrategy+" -allow-concurrent", "vtgate", "", "", true)) // skip wait @@ -769,99 +753,99 @@ func testScheduler(t *testing.T) { }) }) - t.Run("Idempotent submission, retry failed migration", func(t *testing.T) { - uuid := "00000000_1111_2222_3333_444444444444" - overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true, UUIDList: uuid, MigrationContext: "idempotent:1111-2222-3333"} - defer func() { overrideVtctlParams = nil }() - // create a migration and cancel it. We don't let it complete. We want it in "failed" state - t.Run("start and fail migration", func(t *testing.T) { - executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" -postpone-completion", "vtctl", "", "", true)) // skip wait - require.Equal(t, uuid, executedUUID) - onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusRunning) - // let's cancel it - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusCancelled) - }) - - // now, we submit the exact same migratoin again: same UUID, same migration context. - t.Run("resubmit migration", func(t *testing.T) { - executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtctl", "", "", true)) // skip wait - require.Equal(t, uuid, executedUUID) - - // expect it to complete - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - retries := row.AsInt64("retries", 0) - assert.Greater(t, retries, int64(0)) - } - }) - }) - - t.Run("Idempotent submission, retry failed migration in singleton context", func(t *testing.T) { - uuid := "00000000_1111_3333_3333_444444444444" - ddlStrategy := ddlStrategy + " --singleton-context" - overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true, UUIDList: uuid, MigrationContext: "idempotent:1111-3333-3333"} - defer func() { overrideVtctlParams = nil }() - // create a migration and cancel it. We don't let it complete. We want it in "failed" state - t.Run("start and fail migration", func(t *testing.T) { - executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-completion", "vtctl", "", "", true)) // skip wait - require.Equal(t, uuid, executedUUID) - onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusRunning) - // let's cancel it - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusCancelled) - }) - - // now, we submit the exact same migratoin again: same UUID, same migration context. - t.Run("resubmit migration", func(t *testing.T) { - executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtctl", "", "", true)) // skip wait - require.Equal(t, uuid, executedUUID) - - // expect it to complete - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - retries := row.AsInt64("retries", 0) - assert.Greater(t, retries, int64(0)) - } - }) - }) + //t.Run("Idempotent submission, retry failed migration", func(t *testing.T) { + // uuid := "00000000_1111_2222_3333_444444444444" + // overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true, UUIDList: uuid, MigrationContext: "idempotent:1111-2222-3333"} + // defer func() { overrideVtctlParams = nil }() + // // create a migration and cancel it. We don't let it complete. We want it in "failed" state + // t.Run("start and fail migration", func(t *testing.T) { + // executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" -postpone-completion", "vtctl", "", "", true)) // skip wait + // require.Equal(t, uuid, executedUUID) + // onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusRunning) + // // let's cancel it + // onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusCancelled) + // }) + // + // // now, we submit the exact same migratoin again: same UUID, same migration context. + // t.Run("resubmit migration", func(t *testing.T) { + // executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtctl", "", "", true)) // skip wait + // require.Equal(t, uuid, executedUUID) + // + // // expect it to complete + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + // + // rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + // require.NotNil(t, rs) + // for _, row := range rs.Named().Rows { + // retries := row.AsInt64("retries", 0) + // assert.Greater(t, retries, int64(0)) + // } + // }) + //}) + // + //t.Run("Idempotent submission, retry failed migration in singleton context", func(t *testing.T) { + // uuid := "00000000_1111_3333_3333_444444444444" + // ddlStrategy := ddlStrategy + " --singleton-context" + // overrideVtctlParams = &cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true, UUIDList: uuid, MigrationContext: "idempotent:1111-3333-3333"} + // defer func() { overrideVtctlParams = nil }() + // // create a migration and cancel it. We don't let it complete. We want it in "failed" state + // t.Run("start and fail migration", func(t *testing.T) { + // executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy+" --postpone-completion", "vtctl", "", "", true)) // skip wait + // require.Equal(t, uuid, executedUUID) + // onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusRunning) + // // let's cancel it + // onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusCancelled) + // }) + // + // // now, we submit the exact same migratoin again: same UUID, same migration context. + // t.Run("resubmit migration", func(t *testing.T) { + // executedUUID := testOnlineDDLStatement(t, createParams(trivialAlterT1Statement, ddlStrategy, "vtctl", "", "", true)) // skip wait + // require.Equal(t, uuid, executedUUID) + // + // // expect it to complete + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + // + // rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + // require.NotNil(t, rs) + // for _, row := range rs.Named().Rows { + // retries := row.AsInt64("retries", 0) + // assert.Greater(t, retries, int64(0)) + // } + // }) + //}) // INSTANT DDL - instantDDLCapable, err := capableOf(mysql.InstantAddLastColumnFlavorCapability) - require.NoError(t, err) - if instantDDLCapable { - t.Run("INSTANT DDL: postpone-completion", func(t *testing.T) { - t1uuid := testOnlineDDLStatement(t, createParams(instantAlterT1Statement, ddlStrategy+" --prefer-instant-ddl --postpone-completion", "vtgate", "", "", true)) - - t.Run("expect t1 queued", func(t *testing.T) { - // we want to validate that the migration remains queued even after some time passes. It must not move beyond 'queued' - time.Sleep(ensureStateNotChangedTime) - onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) - }) - t.Run("complete t1", func(t *testing.T) { - // Issue a complete and wait for successful completion - onlineddl.CheckCompleteMigration(t, &vtParams, shards, t1uuid, true) - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) - }) - }) - } + //instantDDLCapable, err := capableOf(mysql.InstantAddLastColumnFlavorCapability) + //require.NoError(t, err) + //if instantDDLCapable { + //t.Run("INSTANT DDL: postpone-completion", func(t *testing.T) { + // t1uuid := testOnlineDDLStatement(t, createParams(instantAlterT1Statement, ddlStrategy+" --prefer-instant-ddl --postpone-completion", "vtgate", "", "", true)) + // + // t.Run("expect t1 queued", func(t *testing.T) { + // // we want to validate that the migration remains queued even after some time passes. It must not move beyond 'queued' + // time.Sleep(ensureStateNotChangedTime) + // onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) + // }) + // t.Run("complete t1", func(t *testing.T) { + // // Issue a complete and wait for successful completion + // onlineddl.CheckCompleteMigration(t, &vtParams, shards, t1uuid, true) + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) + // }) + //}) + //} // 'mysql' strategy t.Run("mysql strategy", func(t *testing.T) { t.Run("declarative", func(t *testing.T) { @@ -904,88 +888,88 @@ func testScheduler(t *testing.T) { }) }) // in-order-completion - t.Run("in-order-completion: multiple drops for nonexistent tables and views", func(t *testing.T) { - u, err := schema.CreateOnlineDDLUUID() - require.NoError(t, err) - - sqls := []string{ - fmt.Sprintf("drop table if exists t4_%s", u), - fmt.Sprintf("drop view if exists t1_%s", u), - fmt.Sprintf("drop table if exists t2_%s", u), - fmt.Sprintf("drop view if exists t3_%s", u), - } - sql := strings.Join(sqls, ";") - var vuuids []string - t.Run("drop multiple tables and views, in-order-completion", func(t *testing.T) { - uuidList := testOnlineDDLStatement(t, createParams(sql, ddlStrategy+" --allow-concurrent --in-order-completion", "vtctl", "", "", true)) // skip wait - vuuids = strings.Split(uuidList, "\n") - assert.Equal(t, 4, len(vuuids)) - for _, uuid := range vuuids { - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - } - }) - require.Equal(t, 4, len(vuuids)) - for i := range vuuids { - if i > 0 { - testTableCompletionTimes(t, vuuids[i-1], vuuids[i]) - } - } - }) - t.Run("in-order-completion: two new views, one depends on the other", func(t *testing.T) { - u, err := schema.CreateOnlineDDLUUID() - require.NoError(t, err) - v2name := fmt.Sprintf("v2_%s", u) - createv2 := fmt.Sprintf("create view %s as select id from t1_test", v2name) - v1name := fmt.Sprintf("v1_%s", u) - createv1 := fmt.Sprintf("create view %s as select id from %s", v1name, v2name) - - sql := fmt.Sprintf("%s; %s;", createv2, createv1) - var vuuids []string - t.Run("create two views, expect both complete", func(t *testing.T) { - uuidList := testOnlineDDLStatement(t, createParams(sql, ddlStrategy+" --allow-concurrent --in-order-completion", "vtctl", "", "", true)) // skip wait - vuuids = strings.Split(uuidList, "\n") - assert.Equal(t, 2, len(vuuids)) - for _, uuid := range vuuids { - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - } - }) - require.Equal(t, 2, len(vuuids)) - testTableCompletionTimes(t, vuuids[0], vuuids[1]) - }) - t.Run("in-order-completion: new table column, new view depends on said column", func(t *testing.T) { - // The VIEW creation can only succeed when the ALTER has completed and the table has the new column - t1uuid = testOnlineDDLStatement(t, createParams(alterExtraColumn, ddlStrategy+" --allow-concurrent --postpone-completion --in-order-completion", "vtctl", "", "", true)) // skip wait - v1uuid := testOnlineDDLStatement(t, createParams(createViewDependsOnExtraColumn, ddlStrategy+" --allow-concurrent --postpone-completion --in-order-completion", "vtctl", "", "", true)) // skip wait - - testAllowConcurrent(t, "t1", t1uuid, 1) - testAllowConcurrent(t, "v1", v1uuid, 1) - t.Run("expect table running, expect view ready", func(t *testing.T) { - onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) - onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) - time.Sleep(ensureStateNotChangedTime) - // nothing should change - onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) - onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) - }) - t.Run("complete both", func(t *testing.T) { - onlineddl.CheckCompleteAllMigrations(t, &vtParams, len(shards)*2) - }) - t.Run("expect table success", func(t *testing.T) { - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) - }) - t.Run("expect view success", func(t *testing.T) { - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) - fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, v1uuid, schema.OnlineDDLStatusComplete) - }) - testTableCompletionTimes(t, t1uuid, v1uuid) - }) + //t.Run("in-order-completion: multiple drops for nonexistent tables and views", func(t *testing.T) { + // u, err := schema.CreateOnlineDDLUUID() + // require.NoError(t, err) + // + // sqls := []string{ + // fmt.Sprintf("drop table if exists t4_%s", u), + // fmt.Sprintf("drop view if exists t1_%s", u), + // fmt.Sprintf("drop table if exists t2_%s", u), + // fmt.Sprintf("drop view if exists t3_%s", u), + // } + // sql := strings.Join(sqls, ";") + // var vuuids []string + // t.Run("drop multiple tables and views, in-order-completion", func(t *testing.T) { + // uuidList := testOnlineDDLStatement(t, createParams(sql, ddlStrategy+" --allow-concurrent --in-order-completion", "vtctl", "", "", true)) // skip wait + // vuuids = strings.Split(uuidList, "\n") + // assert.Equal(t, 4, len(vuuids)) + // for _, uuid := range vuuids { + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + // } + // }) + // require.Equal(t, 4, len(vuuids)) + // for i := range vuuids { + // if i > 0 { + // testTableCompletionTimes(t, vuuids[i-1], vuuids[i]) + // } + // } + //}) + //t.Run("in-order-completion: two new views, one depends on the other", func(t *testing.T) { + // u, err := schema.CreateOnlineDDLUUID() + // require.NoError(t, err) + // v2name := fmt.Sprintf("v2_%s", u) + // createv2 := fmt.Sprintf("create view %s as select id from t1_test", v2name) + // v1name := fmt.Sprintf("v1_%s", u) + // createv1 := fmt.Sprintf("create view %s as select id from %s", v1name, v2name) + // + // sql := fmt.Sprintf("%s; %s;", createv2, createv1) + // var vuuids []string + // t.Run("create two views, expect both complete", func(t *testing.T) { + // uuidList := testOnlineDDLStatement(t, createParams(sql, ddlStrategy+" --allow-concurrent --in-order-completion", "vtctl", "", "", true)) // skip wait + // vuuids = strings.Split(uuidList, "\n") + // assert.Equal(t, 2, len(vuuids)) + // for _, uuid := range vuuids { + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + // } + // }) + // require.Equal(t, 2, len(vuuids)) + // testTableCompletionTimes(t, vuuids[0], vuuids[1]) + //}) + //t.Run("in-order-completion: new table column, new view depends on said column", func(t *testing.T) { + // // The VIEW creation can only succeed when the ALTER has completed and the table has the new column + // t1uuid = testOnlineDDLStatement(t, createParams(alterExtraColumn, ddlStrategy+" --allow-concurrent --postpone-completion --in-order-completion", "vtctl", "", "", true)) // skip wait + // v1uuid := testOnlineDDLStatement(t, createParams(createViewDependsOnExtraColumn, ddlStrategy+" --allow-concurrent --postpone-completion --in-order-completion", "vtctl", "", "", true)) // skip wait + // + // testAllowConcurrent(t, "t1", t1uuid, 1) + // testAllowConcurrent(t, "v1", v1uuid, 1) + // t.Run("expect table running, expect view ready", func(t *testing.T) { + // onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) + // onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) + // time.Sleep(ensureStateNotChangedTime) + // // nothing should change + // onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusRunning) + // onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusQueued, schema.OnlineDDLStatusReady) + // }) + // t.Run("complete both", func(t *testing.T) { + // onlineddl.CheckCompleteAllMigrations(t, &vtParams, len(shards)*2) + // }) + // t.Run("expect table success", func(t *testing.T) { + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, t1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, t1uuid, schema.OnlineDDLStatusComplete) + // }) + // t.Run("expect view success", func(t *testing.T) { + // status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, v1uuid, normalWaitTime, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + // fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) + // onlineddl.CheckMigrationStatus(t, &vtParams, shards, v1uuid, schema.OnlineDDLStatusComplete) + // }) + // testTableCompletionTimes(t, t1uuid, v1uuid) + //}) } func testSingleton(t *testing.T) { diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 105f4177a7..c7440001c3 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -1,3 +1,8 @@ +/* +Copyright ApeCloud, Inc. +Licensed under the Apache v2(found in the LICENSE file in the root directory). +*/ + /* Copyright 2019 The Vitess Authors. @@ -27,11 +32,12 @@ import ( "testing" "time" + "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" + "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/onlineddl" "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" throttlebase "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/base" "github.com/stretchr/testify/assert" @@ -50,10 +56,10 @@ var ( extendedMigrationWait = 20 * time.Second hostname = "localhost" - keyspaceName = "ks" + keyspaceName = "mysql" cell = "zone1" schemaChangeDirectory = "" - totalTableCount = 4 + totalTableCount = 6 createTable = ` CREATE TABLE %s ( id bigint(20) NOT NULL, @@ -191,7 +197,7 @@ func TestMain(m *testing.M) { VSchema: vSchema, } - if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { + if err := clusterInstance.StartKeyspace(*keyspace, []string{"0"}, 1, false); err != nil { return 1, err } @@ -203,8 +209,9 @@ func TestMain(m *testing.M) { // ensure it is torn down during cluster TearDown clusterInstance.VtgateProcess = *vtgateInstance vtParams = mysql.ConnParams{ - Host: clusterInstance.Hostname, - Port: clusterInstance.VtgateMySQLPort, + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + DbName: keyspaceName, } return m.Run(), nil @@ -245,7 +252,7 @@ func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) shards = clusterInstance.Keyspaces[0].Shards - require.Equal(t, 2, len(shards)) + require.Equal(t, 1, len(shards)) for _, shard := range shards { require.Equal(t, 2, len(shard.Vttablets)) } @@ -260,7 +267,7 @@ func TestSchemaChange(t *testing.T) { testWithInitialSchema(t) t.Run("alter non_online", func(t *testing.T) { - _ = testOnlineDDLStatement(t, alterTableNormalStatement, string(schema.DDLStrategyDirect), providedUUID, providedMigrationContext, "vtctl", "non_online", "", false) + _ = testOnlineDDLStatement(t, alterTableNormalStatement, string(schema.DDLStrategyDirect), providedUUID, providedMigrationContext, "vtgate", "non_online", "", false) insertRows(t, 2) testRows(t) }) @@ -290,36 +297,9 @@ func TestSchemaChange(t *testing.T) { assert.Equal(t, int64(-1), retainArtifactSeconds) } }) - t.Run("successful online alter, vtctl", func(t *testing.T) { - insertRows(t, 2) - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "online", providedUUID, providedMigrationContext, "vtctl", "vrepl_col", "", false) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - testRows(t) - testMigrationRowCount(t, uuid) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) - }) - t.Run("successful online alter, vtctl, explicit UUID", func(t *testing.T) { - insertRows(t, 2) - providedUUID = "00000000_51c9_11ec_9cf2_0a43f95f28a3" - providedMigrationContext = "endtoend:0000-1111" - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtctl", "vrepl_col", "", false) - assert.Equal(t, providedUUID, uuid) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - testRows(t) - testMigrationRowCount(t, uuid) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckMigrationArtifacts(t, &vtParams, shards, uuid, true) - }) - t.Run("duplicate migration, implicitly ignored", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "online", providedUUID, providedMigrationContext, "vtctl", "vrepl_col", "", true) - assert.Equal(t, providedUUID, uuid) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - }) + t.Run("fail duplicate migration with different context", func(t *testing.T) { - _ = testOnlineDDLStatement(t, alterTableTrivialStatement, "online", providedUUID, "endtoend:different-context-0000", "vtctl", "vrepl_col", "rejected", true) + _ = testOnlineDDLStatement(t, alterTableTrivialStatement, "online", providedUUID, "endtoend:different-context-0000", "vtgate", "vrepl_col", "rejected", true) }) providedUUID = "" providedMigrationContext = "" @@ -345,6 +325,7 @@ func TestSchemaChange(t *testing.T) { // In this endtoend test we test both direct tablet API for throttling, as well as VTGate queries. // - VTGate queries (`ALTER VITESS_MIGRATION THROTTLE ALL ...`) are sent to all relevant shards/tablets via QueryExecutor // - tablet API calls have to be sent per-shard to the primary tablet of that shard + t.Run("throttled migration", func(t *testing.T) { // Use VTGate for throttling, issue a `ALTER VITESS_MIGRATION THROTTLE ALL ...` insertRows(t, 2) @@ -467,34 +448,11 @@ func TestSchemaChange(t *testing.T) { wg.Wait() onlineddl.CheckCancelAllMigrations(t, &vtParams, len(shards)*count) }) - t.Run("cancel all migrations: some migrations to cancel via vtctl", func(t *testing.T) { - // Use VTGate for throttling, issue a `ALTER VITESS_MIGRATION THROTTLE ALL ...` - onlineddl.ThrottleAllMigrations(t, &vtParams) - defer onlineddl.UnthrottleAllMigrations(t, &vtParams) - onlineddl.CheckThrottledApps(t, &vtParams, onlineDDLThrottlerAppName, true) - - // spawn n migrations; cancel them via cancel-all - var wg sync.WaitGroup - count := 4 - for i := 0; i < count; i++ { - wg.Add(1) - go func() { - defer wg.Done() - _ = testOnlineDDLStatement(t, alterTableThrottlingStatement, "online", providedUUID, providedMigrationContext, "vtgate", "vrepl_col", "", false) - }() - } - wg.Wait() - // cancelling via vtctl does not return values. We CANCEL ALL via vtctl, then validate via VTGate that nothing remains to be cancelled. - onlineddl.CheckCancelAllMigrationsViaVtctl(t, &clusterInstance.VtctlclientProcess, keyspaceName) - onlineddl.CheckCancelAllMigrations(t, &vtParams, 0) - }) - // reparent shard -80 to replica // and then reparent it back to original state // (two pretty much identical tests, the point is to end up with original state) for _, currentPrimaryTabletIndex := range []int{0, 1} { currentPrimaryTablet := shards[0].Vttablets[currentPrimaryTabletIndex] - reparentTablet := shards[0].Vttablets[1-currentPrimaryTabletIndex] t.Run(fmt.Sprintf("PlannedReparentShard via throttling %d/2", (currentPrimaryTabletIndex+1)), func(t *testing.T) { insertRows(t, 2) @@ -522,46 +480,16 @@ func TestSchemaChange(t *testing.T) { _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusRunning) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) }) - t.Run("wait for vreplication to run on shard -80", func(t *testing.T) { + t.Run("wait for vreplication to run on shard 0", func(t *testing.T) { vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, "Copying", "Running") require.Contains(t, []string{"Copying", "Running"}, vreplStatus) }) - t.Run("wait for vreplication to run on shard 80-", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) - }) t.Run("check status again", func(t *testing.T) { // again see that we're still 'running' onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) testRows(t) }) - t.Run("Check tablet", func(t *testing.T) { - // onlineddl.Executor marks this migration with its tablet alias - // reminder that onlineddl.Executor runs on the primary tablet. - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - shard := row["shard"].ToString() - tablet := row["tablet"].ToString() - - switch shard { - case "-80": - require.Equal(t, currentPrimaryTablet.Alias, tablet) - case "80-": - require.Equal(t, shards[1].Vttablets[0].Alias, tablet) - default: - require.NoError(t, fmt.Errorf("unexpected shard name: %s", shard)) - } - } - }) - t.Run("PRS shard -80", func(t *testing.T) { - // migration has started and is throttled. We now run PRS - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias) - require.NoError(t, err, "failed PRS: %v", err) - rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "") - onlineddl.PrintQueryResult(os.Stdout, rs) - }) t.Run("unthrottle", func(t *testing.T) { for i := range shards { var body string @@ -584,33 +512,6 @@ func TestSchemaChange(t *testing.T) { _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) }) - - t.Run("Check tablet post PRS", func(t *testing.T) { - // onlineddl.Executor will find that a vrepl migration started in a different tablet. - // it will own the tablet and will update 'tablet' column in mysql.schema_migrations with its own - // (promoted primary) tablet alias. - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - shard := row["shard"].ToString() - tablet := row["tablet"].ToString() - - switch shard { - case "-80": - // PRS for this tablet, we promoted tablet[1] - require.Equal(t, reparentTablet.Alias, tablet) - case "80-": - // No PRS for this tablet - require.Equal(t, shards[1].Vttablets[0].Alias, tablet) - default: - require.NoError(t, fmt.Errorf("unexpected shard name: %s", shard)) - } - } - - onlineddl.CheckRetryPartialMigration(t, &vtParams, uuid, 1) - // Now it should complete on the failed shard - _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete) - }) }) } @@ -631,46 +532,15 @@ func TestSchemaChange(t *testing.T) { _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, normalMigrationWait, schema.OnlineDDLStatusRunning) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) }) - t.Run("wait for vreplication to run on shard -80", func(t *testing.T) { + t.Run("wait for vreplication to run on shard 0", func(t *testing.T) { vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, currentPrimaryTablet, uuid, normalMigrationWait, "Copying", "Running") require.Contains(t, []string{"Copying", "Running"}, vreplStatus) }) - t.Run("wait for vreplication to run on shard 80-", func(t *testing.T) { - vreplStatus := onlineddl.WaitForVReplicationStatus(t, &vtParams, shards[1].Vttablets[0], uuid, normalMigrationWait, "Copying", "Running") - require.Contains(t, []string{"Copying", "Running"}, vreplStatus) - }) t.Run("check status again", func(t *testing.T) { // again see that we're still 'running' onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning) testRows(t) }) - - t.Run("Check tablet", func(t *testing.T) { - // onlineddl.Executor marks this migration with its tablet alias - // reminder that onlineddl.Executor runs on the primary tablet. - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - shard := row["shard"].ToString() - tablet := row["tablet"].ToString() - - switch shard { - case "-80": - require.Equal(t, currentPrimaryTablet.Alias, tablet) - case "80-": - require.Equal(t, shards[1].Vttablets[0].Alias, tablet) - default: - require.NoError(t, fmt.Errorf("unexpected shard name: %s", shard)) - } - } - }) - t.Run("PRS shard -80", func(t *testing.T) { - // migration has started and completion is postponed. We now PRS - err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "--", "--keyspace_shard", keyspaceName+"/-80", "--new_primary", reparentTablet.Alias) - require.NoError(t, err, "failed PRS: %v", err) - rs := onlineddl.VtgateExecQuery(t, &vtParams, "show vitess_tablets", "") - onlineddl.PrintQueryResult(os.Stdout, rs) - }) t.Run("complete and expect completion", func(t *testing.T) { query := fmt.Sprintf("select * from mysql.vreplication where workflow ='%s'", uuid) rs, err := reparentTablet.VttabletProcess.QueryTablet(query, "", true) @@ -682,56 +552,8 @@ func TestSchemaChange(t *testing.T) { _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) }) - - t.Run("Check tablet post PRS", func(t *testing.T) { - // onlineddl.Executor will find that a vrepl migration started in a different tablet. - // it will own the tablet and will update 'tablet' column in mysql.schema_migrations with its own - // (promoted primary) tablet alias. - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - shard := row["shard"].ToString() - tablet := row["tablet"].ToString() - - switch shard { - case "-80": - // PRS for this tablet - require.Equal(t, reparentTablet.Alias, tablet) - case "80-": - // No PRS for this tablet - require.Equal(t, shards[1].Vttablets[0].Alias, tablet) - default: - require.NoError(t, fmt.Errorf("unexpected shard name: %s", shard)) - } - } - - onlineddl.CheckRetryPartialMigration(t, &vtParams, uuid, 1) - // Now it should complete on the failed shard - _ = onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, extendedMigrationWait, schema.OnlineDDLStatusComplete) - }) }) } - - t.Run("Online DROP, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLDropTableStatement, "online", providedUUID, providedMigrationContext, "vtctl", "", "", false) - t.Run("test ready to complete", func(t *testing.T) { - rs := onlineddl.ReadMigrations(t, &vtParams, uuid) - require.NotNil(t, rs) - for _, row := range rs.Named().Rows { - readyToComplete := row.AsInt64("ready_to_complete", 0) - assert.Equal(t, int64(1), readyToComplete) - } - }) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - t.Run("Online CREATE, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "vitess", providedUUID, providedMigrationContext, "vtctl", "online_ddl_create_col", "", false) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) t.Run("Online DROP TABLE IF EXISTS, vtgate", func(t *testing.T) { uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "online ", providedUUID, providedMigrationContext, "vtgate", "", "", false) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) @@ -740,21 +562,13 @@ func TestSchemaChange(t *testing.T) { // this table existed checkTables(t, schema.OnlineDDLToGCUUID(uuid), 1) }) - t.Run("Online CREATE, vtctl, extra flags", func(t *testing.T) { - // the flags are meaningless to this migration. The test just validates that they don't get in the way. - uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "vitess --prefer-instant-ddl --allow-zero-in-date", providedUUID, providedMigrationContext, "vtctl", "online_ddl_create_col", "", false) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) + t.Run("Online DROP TABLE IF EXISTS, vtgate, extra flags", func(t *testing.T) { // the flags are meaningless to this migration. The test just validates that they don't get in the way. uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "vitess --prefer-instant-ddl --allow-zero-in-date", providedUUID, providedMigrationContext, "vtgate", "", "", false) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - // this table existed - checkTables(t, schema.OnlineDDLToGCUUID(uuid), 1) }) t.Run("Online DROP TABLE IF EXISTS for nonexistent table, vtgate", func(t *testing.T) { uuid := testOnlineDDLStatement(t, onlineDDLDropTableIfExistsStatement, "online", providedUUID, providedMigrationContext, "vtgate", "", "", false) @@ -785,85 +599,109 @@ func TestSchemaChange(t *testing.T) { onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) }) - t.Run("Online CREATE, vtctl", func(t *testing.T) { - uuid := testOnlineDDLStatement(t, onlineDDLCreateTableStatement, "vitess", providedUUID, providedMigrationContext, "vtctl", "online_ddl_create_col", "", false) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) - onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) - }) - // Technically the next test should belong in onlineddl_revert suite. But we're tking advantage of setup and functionality existing in this tets: // - two shards as opposed to one // - tablet throttling t.Run("Revert a migration completed on one shard and cancelled on another", func(t *testing.T) { // shard 0 will run normally, shard 1 will be throttled - defer unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName) + defer unthrottleApp(shards[0].Vttablets[0], onlineDDLThrottlerAppName) t.Run("throttle shard 1", func(t *testing.T) { - body, err := throttleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName) + body, err := throttleApp(shards[0].Vttablets[0], onlineDDLThrottlerAppName) assert.NoError(t, err) assert.Contains(t, body, onlineDDLThrottlerAppName) }) - var uuid string + var uuid4, uuid5 string t.Run("run migrations, expect 1st to complete, 2nd to be running", func(t *testing.T) { - uuid = testOnlineDDLStatement(t, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) + uuid4 = testOnlineDDLStatementOnTableID(t, 4, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) + uuid5 = testOnlineDDLStatementOnTableID(t, 5, alterTableTrivialStatement, "vitess", providedUUID, providedMigrationContext, "vtgate", "test_val", "", true) + body, err := throttleApp(shards[0].Vttablets[0], uuid5) + defer unthrottleApp(shards[0].Vttablets[0], uuid5) + assert.NoError(t, err) + assert.Contains(t, body, uuid5) + unthrottleApp(shards[0].Vttablets[0], onlineDDLThrottlerAppName) { - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], uuid, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], uuid4, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], uuid4, schema.OnlineDDLStatusComplete) } { // shard 1 is throttled - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[1:], uuid, normalMigrationWait, schema.OnlineDDLStatusRunning) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], uuid5, normalMigrationWait, schema.OnlineDDLStatusRunning) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards[1:], uuid, schema.OnlineDDLStatusRunning) + onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], uuid5, schema.OnlineDDLStatusRunning) } }) t.Run("check cancel migration", func(t *testing.T) { onlineddl.CheckCancelAllMigrations(t, &vtParams, 1) }) - t.Run("unthrottle shard 1", func(t *testing.T) { - body, err := unthrottleApp(shards[1].Vttablets[0], onlineDDLThrottlerAppName) + t.Run("unthrottle table 5", func(t *testing.T) { + body, err := unthrottleApp(shards[0].Vttablets[0], uuid5) assert.NoError(t, err) - assert.Contains(t, body, onlineDDLThrottlerAppName) + assert.Contains(t, body, uuid5) }) - var revertUUID string + var revertUUID4 string + var revertUUID5 string t.Run("issue revert migration", func(t *testing.T) { - revertQuery := fmt.Sprintf("revert vitess_migration '%s'", uuid) - rs := onlineddl.VtgateExecQuery(t, &vtParams, revertQuery, "") - require.NotNil(t, rs) - row := rs.Named().Row() + revertQuery4 := fmt.Sprintf("revert vitess_migration '%s'", uuid4) + revertQuery5 := fmt.Sprintf("revert vitess_migration '%s'", uuid5) + + rs4 := onlineddl.VtgateExecQuery(t, &vtParams, revertQuery4, "") + rs5 := onlineddl.VtgateExecQuery(t, &vtParams, revertQuery5, "") + + require.NotNil(t, rs4) + row := rs4.Named().Row() + require.NotNil(t, row) + revertUUID4 = row.AsString("uuid", "") + assert.NotEmpty(t, revertUUID4) + + require.NotNil(t, rs5) + row = rs5.Named().Row() require.NotNil(t, row) - revertUUID = row.AsString("uuid", "") - assert.NotEmpty(t, revertUUID) + revertUUID5 = row.AsString("uuid", "") + assert.NotEmpty(t, revertUUID5) }) t.Run("expect one revert successful, another failed", func(t *testing.T) { { // shard 0 migration was complete. Revert should be successful - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], revertUUID, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], revertUUID4, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], revertUUID, schema.OnlineDDLStatusComplete) + onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], revertUUID4, schema.OnlineDDLStatusComplete) } { // shard 0 migration was cancelled. Revert should not be possible - status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[1:], revertUUID, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards[:1], revertUUID5, normalMigrationWait, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) - onlineddl.CheckMigrationStatus(t, &vtParams, shards[1:], revertUUID, schema.OnlineDDLStatusFailed) + onlineddl.CheckMigrationStatus(t, &vtParams, shards[:1], revertUUID5, schema.OnlineDDLStatusFailed) } }) t.Run("expect two rows in SHOW VITESS_MIGRATIONS", func(t *testing.T) { // This validates that the shards are reflected correctly in output of SHOW VITESS_MIGRATIONS - rs := onlineddl.ReadMigrations(t, &vtParams, revertUUID) - require.NotNil(t, rs) - require.Equal(t, 2, len(rs.Rows)) - for _, row := range rs.Named().Rows { + rs4 := onlineddl.ReadMigrations(t, &vtParams, revertUUID4) + rs5 := onlineddl.ReadMigrations(t, &vtParams, revertUUID5) + + require.NotNil(t, rs4) + require.NotNil(t, rs5) + require.Equal(t, 1, len(rs4.Rows)) + require.Equal(t, 1, len(rs5.Rows)) + for _, row := range rs4.Named().Rows { shard := row["shard"].ToString() status := row["migration_status"].ToString() switch shard { - case "-80": + case "0": require.Equal(t, string(schema.OnlineDDLStatusComplete), status) - case "80-": + default: + require.NoError(t, fmt.Errorf("unexpected shard name: %s", shard)) + } + } + + for _, row := range rs5.Named().Rows { + shard := row["shard"].ToString() + status := row["migration_status"].ToString() + + switch shard { + case "0": require.Equal(t, string(schema.OnlineDDLStatusFailed), status) default: require.NoError(t, fmt.Errorf("unexpected shard name: %s", shard)) @@ -871,6 +709,7 @@ func TestSchemaChange(t *testing.T) { } }) }) + t.Run("summary: validate sequential migration IDs", func(t *testing.T) { onlineddl.ValidateSequentialMigrationIDs(t, &vtParams, shards) }) @@ -931,12 +770,11 @@ func testWithInitialSchema(t *testing.T) { } // Check if 4 tables are created - checkTables(t, "", totalTableCount) + checkTables(t, "vt_onlineddl_test_", totalTableCount) } -// testOnlineDDLStatement runs an online DDL, ALTER statement -func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, providedUUIDList string, providedMigrationContext string, executeStrategy string, expectHint string, expectError string, skipWait bool) (uuid string) { - tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) +func testOnlineDDLStatementOnTableID(t *testing.T, tableID int, alterStatement string, ddlStrategy string, providedUUIDList string, providedMigrationContext string, executeStrategy string, expectHint string, expectError string, skipWait bool) (uuid string) { + tableName := fmt.Sprintf("vt_onlineddl_test_%02d", tableID) sqlQuery := fmt.Sprintf(alterStatement, tableName) if executeStrategy == "vtgate" { row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, sqlQuery, "").Named().Row() @@ -975,6 +813,11 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str return uuid } +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, providedUUIDList string, providedMigrationContext string, executeStrategy string, expectHint string, expectError string, skipWait bool) (uuid string) { + return testOnlineDDLStatementOnTableID(t, 3, alterStatement, ddlStrategy, providedUUIDList, providedMigrationContext, executeStrategy, expectHint, expectError, skipWait) +} + // checkTables checks the number of tables in the first two shards. func checkTables(t *testing.T, showTableName string, expectCount int) { for i := range clusterInstance.Keyspaces[0].Shards { @@ -986,6 +829,9 @@ func checkTables(t *testing.T, showTableName string, expectCount int) { func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) { query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + for _, row := range queryResult.Rows { + t.Logf("row : %v", row) + } require.Nil(t, err) assert.Equal(t, expectCount, len(queryResult.Rows)) } diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index 9f45aa0c38..fb567ac3de 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -52,7 +52,7 @@ var ( ) const ( - workflowConfigDir = "../.github/workflows" + workflowConfigDir = "../.github/workflows/archive" unitTestTemplate = "templates/unit_test.tpl" diff --git a/tools/wesql_onlineddl_scheduler.sh b/tools/wesql_onlineddl_scheduler.sh new file mode 100755 index 0000000000..292907f05a --- /dev/null +++ b/tools/wesql_onlineddl_scheduler.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# run tests against a local cluster +export VTDATAROOT="/tmp/" +source build.env +go run test.go -docker=false -follow -shard onlineddl_scheduler \ No newline at end of file diff --git a/tools/wesql_onlineddl_vrepl.sh b/tools/wesql_onlineddl_vrepl.sh new file mode 100755 index 0000000000..42e43afff7 --- /dev/null +++ b/tools/wesql_onlineddl_vrepl.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# run tests against a local cluster +export VTDATAROOT="/tmp/" +source build.env +go run test.go -docker=false -follow -shard onlineddl_vrepl \ No newline at end of file